diff --git a/.clang-format b/.clang-format
index e58d518b3b..56ca83e724 100644
--- a/.clang-format
+++ b/.clang-format
@@ -88,4 +88,3 @@ Standard: Auto
TabWidth: 8
UseTab: Never
...
-
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000..912b302ad2
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+*.py linguist-detectable=false
diff --git a/.gitignore b/.gitignore
index 76b581b182..5f1e24109d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
build/
compile_commands.json
+CMakeSettings.json
.cache
.ycm_extra_conf.py
.tasks
diff --git a/CMakeSettings.json b/CMakeSettings.json
deleted file mode 100644
index d3f2c27bf6..0000000000
--- a/CMakeSettings.json
+++ /dev/null
@@ -1,25 +0,0 @@
-{
- "configurations": [
- {
- "name": "WSL-GCC-Debug",
- "generator": "Unix Makefiles",
- "configurationType": "Debug",
- "buildRoot": "${projectDir}\\build\\",
- "installRoot": "${projectDir}\\build\\",
- "cmakeExecutable": "/usr/bin/cmake",
- "cmakeCommandArgs": "",
- "buildCommandArgs": "",
- "ctestCommandArgs": "",
- "inheritEnvironments": [ "linux_x64" ],
- "wslPath": "${defaultWSLPath}",
- "addressSanitizerRuntimeFlags": "detect_leaks=0",
- "variables": [
- {
- "name": "CMAKE_INSTALL_PREFIX",
- "value": "/mnt/d/TDengine/TDengine/build",
- "type": "PATH"
- }
- ]
- }
- ]
-}
\ No newline at end of file
diff --git a/TDenginelogo.png b/TDenginelogo.png
deleted file mode 100644
index 19a92592d7..0000000000
Binary files a/TDenginelogo.png and /dev/null differ
diff --git a/cmake/cmake.platform b/cmake/cmake.platform
index 887fbd86d5..3aa1ffc07e 100644
--- a/cmake/cmake.platform
+++ b/cmake/cmake.platform
@@ -46,7 +46,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.")
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
- MESSAGE("Current system arch is arm64")
+ MESSAGE("Current system arch is 64")
SET(TD_DARWIN_64 TRUE)
ADD_DEFINITIONS("-D_TD_DARWIN_64")
ENDIF ()
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index c0de75c6dd..68caf9a9ac 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG aa45ad4
+ GIT_TAG 9cb965f
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/docs/en/01-index.md b/docs/en/01-index.md
index 22e62bc5e0..5265be42f8 100644
--- a/docs/en/01-index.md
+++ b/docs/en/01-index.md
@@ -4,25 +4,24 @@ sidebar_label: Documentation Home
slug: /
---
-
-TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
+TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
-TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly.
+TDengine greatly improves the efficiency of data ingestion, querying, and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [Concepts](./concept) thoroughly.
-If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work.
+If you are a developer, please read the [Developer Guide](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, and make a few changes to accommodate your application, and it will work.
-We live in the era of big data, and scale-up is unable to meet the growing needs of business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster deployment"](../deployment).
+We live in the era of big data, and scale-up is unable to meet the growing needs of the business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to [Cluster Deployment](../deployment).
-TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions.
+TDengine uses ubiquitous SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll-up, interpolation, and time-weighted average, among many others. The [SQL Reference](./taos-sql) chapter describes the SQL syntax in detail and lists the various supported commands and functions.
-If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section.
+If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the [Administration](./operation) section.
-If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter.
+If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the [Reference](./reference) chapter.
-If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
+If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
-TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
+TDengine is an open-source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
-Together, we make a difference.
+Together, we make a difference!
diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md
index 51df831948..d385845d7c 100644
--- a/docs/en/02-intro/index.md
+++ b/docs/en/02-intro/index.md
@@ -12,34 +12,34 @@ This section introduces the major features, competitive advantages, typical use-
The major features are listed below:
1. Insert data
- * supports [using SQL to insert](../develop/insert-data/sql-writing).
- * supports [schemaless writing](../reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB LINE](../develop/insert-data/influxdb-line),[OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json) among others.
- * supports seamless integration with third-party tools like [Telegraf](../third-party/telegraf/), [Prometheus](../third-party/prometheus/), [collectd](../third-party/collectd/), [StatsD](../third-party/statsd/), [TCollector](../third-party/tcollector/) and [icinga2/](../third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code.
+ - Supports [using SQL to insert](../develop/insert-data/sql-writing).
+ - Supports [schemaless writing](../reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB Line](../develop/insert-data/influxdb-line), [OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json) among others.
+ - Supports seamless integration with third-party tools like [Telegraf](../third-party/telegraf/), [Prometheus](../third-party/prometheus/), [collectd](../third-party/collectd/), [StatsD](../third-party/statsd/), [TCollector](../third-party/tcollector/), [EMQX](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker), and [Icinga2](../third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code.
2. Query data
- * supports standard [SQL](../taos-sql/), including nested query.
- * supports [time series specific functions](../taos-sql/function/#time-series-extensions) and [time series specific queries](../taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others.
- * supports [user defined functions](../taos-sql/udf).
+ - Supports standard [SQL](../taos-sql/), including nested query.
+ - Supports [time series specific functions](../taos-sql/function/#time-series-extensions) and [time series specific queries](../taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others.
+ - Supports [User Defined Functions (UDF)](../taos-sql/udf).
3. [Caching](../develop/cache/): TDengine always saves the last data point in cache, so Redis is not needed for time-series data processing.
-4. [Stream Processing](../develop/stream/): not only is the continuous query is supported, but TDengine also supports even driven stream processing, so Flink or spark is not needed for time-series daata processing.
-5. [Data Dubscription](../develop/tmq/): application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
+4. [Stream Processing](../develop/stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing.
+5. [Data Subscription](../develop/tmq/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
6. Visualization
- * supports seamless integration with [Grafana](../third-party/grafana/) for visualization.
- * supports seamless integration with Google Data Studio.
+ - Supports seamless integration with [Grafana](../third-party/grafana/) for visualization.
+ - Supports seamless integration with Google Data Studio.
7. Cluster
- * supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
- * supports [deployment on Kubernetes](../deployment/k8s/)
- * supports high availability via data replication.
+ - Supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
+ - Supports [deployment on Kubernetes](../deployment/k8s/).
+ - Supports high availability via data replication.
8. Administration
- * provides [monitoring](../operation/monitor) on running instances of TDengine.
- * provides many ways to [import](../operation/import) and [export](../operation/export) data.
+ - Provides [monitoring](../operation/monitor) on running instances of TDengine.
+ - Provides many ways to [import](../operation/import) and [export](../operation/export) data.
9. Tools
- * provides an interactive [command-line interface](../reference/taos-shell) for management, maintenance and ad-hoc queries.
- * provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
+ - Provides an interactive [Command-line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries.
+ - Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
10. Programming
- * provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
- * provides a [REST API](../reference/rest-api/).
+ - Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
+ - Provides a [REST API](../reference/rest-api/).
-For more details on features, please read through the entire documentation.
+For more details on features, please read through the entire documentation.
## Competitive Advantages
@@ -49,23 +49,31 @@ By making full use of [characteristics of time series data](https://tdengine.com
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
-- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
+- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds.
- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[
-](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
+ ](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
-- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
+- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
-With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly;2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly;3: With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
+With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
+
+1. With its superior performance, the computing and storage resources are reduced significantly.
+2. With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly.
+3. With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
## Technical Ecosystem
+
This is how TDengine would be situated, in a typical time-series data processing platform:
+
+

-
Figure 1. TDengine Technical Ecosystem
+
Figure 1. TDengine Technical Ecosystem
+
On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance.
@@ -75,42 +83,42 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
### Characteristics and Requirements of Data Sources
-| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
-| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry.|
-| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
-| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
+| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------ | ------------------ | ----------------------- | ------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry. |
+| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
+| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
### System Architecture Requirements
-| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
+| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ----------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| A simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions. There is no need to integrate any additional third-party products. |
-| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
-| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
+| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
+| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
### System Function Requirements
-| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level.|
-| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
+| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| -------------------------------------------- | ------------------ | ----------------------- | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level. |
+| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
### System Performance Requirements
-| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
-| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products.|
-| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
+| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
+| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
+| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
+| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
### System Maintenance Requirements
-| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
-| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.|
-| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.|
+| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| --------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
+| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs. |
+| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine. |
## Comparison with other databases
diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md
index 901fe69d24..61eb8f04eb 100644
--- a/docs/en/07-develop/01-connect/index.md
+++ b/docs/en/07-develop/01-connect/index.md
@@ -1,6 +1,7 @@
---
-title: Connect
-description: "This document explains how to establish connections to TDengine and how to install and use TDengine connectors."
+sidebar_label: Connect
+title: Connect to TDengine
+description: "How to establish connections to TDengine and how to install and use TDengine connectors."
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/07-develop/03-insert-data/05-high-volume.md b/docs/en/07-develop/03-insert-data/05-high-volume.md
index 8163ae03b2..9ea0c88447 100644
--- a/docs/en/07-develop/03-insert-data/05-high-volume.md
+++ b/docs/en/07-develop/03-insert-data/05-high-volume.md
@@ -16,7 +16,7 @@ To achieve high performance writing, there are a few aspects to consider. In the
From the perspective of application program, you need to consider:
-1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB. It can be configured by parameter `maxSQLLength` on client side, and the default value is 65,480.
+1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB.
2. The number of concurrent connections. Normally more connections can get better result. However, once the number of connections exceeds the processing ability of the server side, the performance may downgrade.
@@ -46,12 +46,9 @@ If the data source is Kafka, then the appication program is a consumer of Kafka,
### Tune TDengine
-TDengine is a distributed and high performance time series database, there are also some ways to tune TDengine to get better writing performance.
+On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
-1. Set proper number of `vgroups` according to available CPU cores. Normally, we recommend 2 \* number_of_cores as a starting point. If the verification result shows this is not enough to utilize CPU resources, you can use a higher value.
-2. Set proper `minTablesPerVnode`, `tableIncStepPerVnode`, and `maxVgroupsPerDb` according to the number of tables so that tables are distributed even across vgroups. The purpose is to balance the workload among all vnodes so that system resources can be utilized better to get higher performance.
-
-For more performance tuning parameters, please refer to [Configuration Parameters](../../../reference/config).
+For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。
## Sample Programs
@@ -359,7 +356,7 @@ Writing process tries to read as much as possible data from message queue and wr
-SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, if the SQL length is closed to `maxSQLLength` the SQL will be executed immediately. To improve writing efficiency, it's better to increase `maxSQLLength` properly.
+SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, and passes the maximum SQL length by parameter maxSQLLength according to actual TDengine limit.
SQLWriter
diff --git a/docs/en/12-taos-sql/14-stream.md b/docs/en/12-taos-sql/14-stream.md
index fcd7876510..deb2522309 100644
--- a/docs/en/12-taos-sql/14-stream.md
+++ b/docs/en/12-taos-sql/14-stream.md
@@ -50,7 +50,7 @@ SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVA
## Delete a Stream
```sql
-DROP STREAM [IF NOT EXISTS] stream_name
+DROP STREAM [IF EXISTS] stream_name
```
This statement deletes the stream processing service only. The data generated by the stream is retained.
diff --git a/docs/en/12-taos-sql/22-meta.md b/docs/en/12-taos-sql/22-meta.md
index 796b25dcb0..9bda5a0a10 100644
--- a/docs/en/12-taos-sql/22-meta.md
+++ b/docs/en/12-taos-sql/22-meta.md
@@ -245,3 +245,35 @@ Provides dnode configuration information.
| 1 | dnode_id | INT | Dnode ID |
| 2 | name | BINARY(32) | Parameter |
| 3 | value | BINARY(64) | Value |
+
+## INS_TOPICS
+
+| # | **Column** | **Data Type** | **Description** |
+| --- | :---------: | ------------ | ------------------------------ |
+| 1 | topic_name | BINARY(192) | Topic name |
+| 2 | db_name | BINARY(64) | Database for the topic |
+| 3 | create_time | TIMESTAMP | Creation time |
+| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
+
+## INS_SUBSCRIPTIONS
+
+| # | **Column** | **Data Type** | **Description** |
+| --- | :------------: | ------------ | ------------------------ |
+| 1 | topic_name | BINARY(204) | Subscribed topic |
+| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
+| 3 | vgroup_id | INT | Vgroup ID for the consumer |
+| 4 | consumer_id | BIGINT | Consumer ID |
+
+## INS_STREAMS
+
+| # | **Column** | **Data Type** | **Description** |
+| --- | :----------: | ------------ | --------------------------------------- |
+| 1 | stream_name | BINARY(64) | Stream name |
+| 2 | create_time | TIMESTAMP | Creation time |
+| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
+| 4 | status | BIANRY(20) | Current status |
+| 5 | source_db | BINARY(64) | Source database |
+| 6 | target_db | BIANRY(64) | Target database |
+| 7 | target_table | BINARY(192) | Target table |
+| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
+| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
diff --git a/docs/en/12-taos-sql/23-perf.md b/docs/en/12-taos-sql/23-perf.md
index 10a9338022..29cf3af6ab 100644
--- a/docs/en/12-taos-sql/23-perf.md
+++ b/docs/en/12-taos-sql/23-perf.md
@@ -61,15 +61,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 12 | sub_status | BINARY(1000) | Subquery status |
| 13 | sql | BINARY(1024) | SQL statement |
-## PERF_TOPICS
-
-| # | **Column** | **Data Type** | **Description** |
-| --- | :---------: | ------------ | ------------------------------ |
-| 1 | topic_name | BINARY(192) | Topic name |
-| 2 | db_name | BINARY(64) | Database for the topic |
-| 3 | create_time | TIMESTAMP | Creation time |
-| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
-
## PERF_CONSUMERS
| # | **Column** | **Data Type** | **Description** |
@@ -83,15 +74,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 7 | subscribe_time | TIMESTAMP | Time of first subscription |
| 8 | rebalance_time | TIMESTAMP | Time of first rebalance triggering |
-## PERF_SUBSCRIPTIONS
-
-| # | **Column** | **Data Type** | **Description** |
-| --- | :------------: | ------------ | ------------------------ |
-| 1 | topic_name | BINARY(204) | Subscribed topic |
-| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
-| 3 | vgroup_id | INT | Vgroup ID for the consumer |
-| 4 | consumer_id | BIGINT | Consumer ID |
-
## PERF_TRANS
| # | **Column** | **Data Type** | **Description** |
@@ -113,17 +95,3 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 2 | create_time | TIMESTAMP | Creation time |
| 3 | stable_name | BINARY(192) | Supertable name |
| 4 | vgroup_id | INT | Dedicated vgroup name |
-
-## PERF_STREAMS
-
-| # | **Column** | **Data Type** | **Description** |
-| --- | :----------: | ------------ | --------------------------------------- |
-| 1 | stream_name | BINARY(64) | Stream name |
-| 2 | create_time | TIMESTAMP | Creation time |
-| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
-| 4 | status | BIANRY(20) | Current status |
-| 5 | source_db | BINARY(64) | Source database |
-| 6 | target_db | BIANRY(64) | Target database |
-| 7 | target_table | BINARY(192) | Target table |
-| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
-| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md
index 37438ee780..b9a3fa2321 100644
--- a/docs/en/12-taos-sql/25-grant.md
+++ b/docs/en/12-taos-sql/25-grant.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Permissions Management
-title: Permissions Management
+sidebar_label: Access Control
+title: User and Access Control
+description: Manage user and user's permission
---
This document describes how to manage permissions in TDengine.
diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/14-reference/03-connector/05-go.mdx
index 00e3bc1bc3..518d3625d5 100644
--- a/docs/en/14-reference/03-connector/05-go.mdx
+++ b/docs/en/14-reference/03-connector/05-go.mdx
@@ -7,7 +7,7 @@ title: TDengine Go Connector
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import Preparition from "./_preparition.mdx"
+import Preparition from "./_preparation.mdx"
import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx"
import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx"
import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx"
diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx
index 1184c98a28..0d391c6ac3 100644
--- a/docs/en/14-reference/03-connector/06-rust.mdx
+++ b/docs/en/14-reference/03-connector/06-rust.mdx
@@ -7,7 +7,7 @@ title: TDengine Rust Connector
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import Preparition from "./_preparition.mdx"
+import Preparition from "./_preparation.mdx"
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx
index fc95033baa..d92a93fd4f 100644
--- a/docs/en/14-reference/03-connector/07-python.mdx
+++ b/docs/en/14-reference/03-connector/07-python.mdx
@@ -7,7 +7,7 @@ description: "taospy is the official Python connector for TDengine. taospy provi
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-`taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
+`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".
diff --git a/docs/en/14-reference/03-connector/08-node.mdx b/docs/en/14-reference/03-connector/08-node.mdx
index f93632b417..bf7c6b95ea 100644
--- a/docs/en/14-reference/03-connector/08-node.mdx
+++ b/docs/en/14-reference/03-connector/08-node.mdx
@@ -7,7 +7,7 @@ title: TDengine Node.js Connector
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-import Preparition from "./_preparition.mdx";
+import Preparition from "./_preparation.mdx";
import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx";
import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx";
import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx";
diff --git a/docs/en/14-reference/03-connector/09-csharp.mdx b/docs/en/14-reference/03-connector/09-csharp.mdx
index 823e907599..bc16cd086b 100644
--- a/docs/en/14-reference/03-connector/09-csharp.mdx
+++ b/docs/en/14-reference/03-connector/09-csharp.mdx
@@ -7,7 +7,7 @@ title: C# Connector
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import Preparition from "./_preparition.mdx"
+import Preparition from "./_preparation.mdx"
import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx"
import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx"
import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx"
diff --git a/docs/en/14-reference/03-connector/_preparition.mdx b/docs/en/14-reference/03-connector/_preparition.mdx
deleted file mode 100644
index 87538ebfd8..0000000000
--- a/docs/en/14-reference/03-connector/_preparition.mdx
+++ /dev/null
@@ -1,10 +0,0 @@
-- 已安装客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装)
-
-:::info
-
-由于 TDengine 的客户端驱动使用 C 语言编写,使用原生连接时需要加载系统对应安装在本地的客户端驱动共享库文件,通常包含在 TDengine 安装包。TDengine Linux 服务端安装包附带了 TDengine 客户端,也可以单独安装 [Linux 客户端](/get-started/) 。在 Windows 环境开发时需要安装 TDengine 对应的 [Windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) 。
-
-- libtaos.so: 在 Linux 系统中成功安装 TDengine 后,依赖的 Linux 版客户端驱动 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
-- taos.dll: 在 Windows 系统中安装完客户端之后,依赖的 Windows 版客户端驱动 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
-
-:::
diff --git a/docs/en/14-reference/14-taosKeeper.md b/docs/en/14-reference/14-taosKeeper.md
index 476b5a1fd2..665bc75380 100644
--- a/docs/en/14-reference/14-taosKeeper.md
+++ b/docs/en/14-reference/14-taosKeeper.md
@@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
-description: Instructions and tips for using taosKeeper
+description: exports TDengine monitoring metrics.
---
## Introduction
@@ -22,26 +22,35 @@ You can compile taosKeeper separately and install it. Please refer to the [taosK
### Configuration and running methods
-
-taosKeeper needs to be executed on the terminal of the operating system. To run taosKeeper, see [configuration file](#configuration-file-parameters-in-detail).
+taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
**Make sure that the TDengine cluster is running correctly before running taosKeeper. ** Ensure that the monitoring service in TDengine has been started. For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
-
+### Environment variable
+
+You can use Environment variable to run taosKeeper and control its behavior:
+
+```shell
+$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
+
+$ taoskeeper
+```
+
+you can run `taoskeeper -h` for more detail.
+
### Configuration File
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
```shell
-taoskeeper -c
+$ taoskeeper -c
```
**Sample configuration files**
@@ -110,7 +119,7 @@ Query OK, 1 rows in database (0.036162s)
#### Export Monitoring Metrics
```shell
-curl http://127.0.0.1:6043/metrics
+$ curl http://127.0.0.1:6043/metrics
```
Sample result set (excerpt):
diff --git a/docs/en/20-third-party/12-google-data-studio.md b/docs/en/20-third-party/12-google-data-studio.md
new file mode 100644
index 0000000000..fc94f98056
--- /dev/null
+++ b/docs/en/20-third-party/12-google-data-studio.md
@@ -0,0 +1,36 @@
+---
+sidebar_label: Google Data Studio
+title: Use Google Data Studio to access TDengine
+---
+
+Data Studio is a powerful tool for reporting and visualization, offering a wide variety of charts and connectors and making it easy to generate reports based on predefined templates. Its ease of use and robust ecosystem have made it one of the first choices for people working in data analysis.
+
+TDengine is a high-performance, scalable time-series database that supports SQL. Many businesses and developers in fields spanning from IoT and Industry Internet to IT and finance are using TDengine as their time-series database management solution.
+
+The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
+
+With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
+
+
+
+Select the TDengine connector and click Authorize.
+
+
+
+Then sign in to your Google Account and click Allow to enable the connection to TDengine.
+
+
+
+In the Enter URL field, type the hostname and port of the server running the TDengine REST service. In the following fields, type your username, password, database name, table name, and the start and end times of your query range. Then, click Connect.
+
+
+
+After the connection is established, you can use Data Studio to process your data and create reports.
+
+
+
+In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data – some examples are shown below.
+
+
+
+With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
diff --git a/docs/en/20-third-party/gds/gds-01.webp b/docs/en/20-third-party/gds/gds-01.webp
new file mode 100644
index 0000000000..2e5f9e4ff5
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-01.webp differ
diff --git a/docs/en/20-third-party/gds/gds-02.png.webp b/docs/en/20-third-party/gds/gds-02.png.webp
new file mode 100644
index 0000000000..3b3537f5a4
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-02.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-03.png.webp b/docs/en/20-third-party/gds/gds-03.png.webp
new file mode 100644
index 0000000000..5719436d5b
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-03.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-04.png.webp b/docs/en/20-third-party/gds/gds-04.png.webp
new file mode 100644
index 0000000000..ddaae5c1a6
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-04.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-05.png.webp b/docs/en/20-third-party/gds/gds-05.png.webp
new file mode 100644
index 0000000000..9a917678fc
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-05.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-06.png.webp b/docs/en/20-third-party/gds/gds-06.png.webp
new file mode 100644
index 0000000000..c76b68d32b
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-06.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-07.png.webp b/docs/en/20-third-party/gds/gds-07.png.webp
new file mode 100644
index 0000000000..1386ae9c4d
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-07.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-08.png.webp b/docs/en/20-third-party/gds/gds-08.png.webp
new file mode 100644
index 0000000000..59dcf8b31d
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-08.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-09.png.webp b/docs/en/20-third-party/gds/gds-09.png.webp
new file mode 100644
index 0000000000..b94439f211
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-09.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-10.png.webp b/docs/en/20-third-party/gds/gds-10.png.webp
new file mode 100644
index 0000000000..a63cad9e9a
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-10.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-11.png.webp b/docs/en/20-third-party/gds/gds-11.png.webp
new file mode 100644
index 0000000000..fc38cd9a29
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-11.png.webp differ
diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md
index 79d5424ac2..f9127121f3 100644
--- a/docs/zh/01-index.md
+++ b/docs/zh/01-index.md
@@ -4,22 +4,22 @@ sidebar_label: 文档首页
slug: /
---
-TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库(Time Series Database, TSDB), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
+TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库(Time Series Database, TSDB), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。
-TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。
+TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[基本概念](./concept)一章。
-如果你是开发者,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要把示例代码拷贝粘贴,针对自己的应用稍作改动,就能跑起来。
+如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。
-我们已经生活在大数据的时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请参考[部署集群](./deployment)一章。
+我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群](./deployment)一章。
-TDengine 采用 SQL 作为其查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
+TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
-如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出,配置参数,怎么监测 TDengine 是否健康运行,怎么提升系统运行的性能,那么请仔细参考[运维指南](./operation)一章。
+如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。
-如果你对 TDengine 外围工具,REST API, 各种编程语言的连接器想做更多详细了解,请看[参考指南](./reference)一章。
+如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。
-如果你对 TDengine 内部的架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
+如果你对 TDengine 的内部架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
-最后,作为一个开源软件,欢迎大家的参与。如果发现文档的任何错误,描述不清晰的地方,都请在每个页面的最下方,点击“编辑本文档“直接进行修改。
+最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
Together, we make a difference!
diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md
index 152d03d9e3..9a0a6fb547 100644
--- a/docs/zh/02-intro.md
+++ b/docs/zh/02-intro.md
@@ -4,53 +4,53 @@ description: 简要介绍 TDengine 的主要功能
toc_max_heading_level: 2
---
-TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网以及工业互联网进行了优化。TDengine 的代码,包括其集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
+TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
-本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。
+本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
## 主要功能
-TDengine的主要功能如下:
+TDengine 的主要功能如下:
1. 写入数据,支持
- - [SQL 写入](../develop/insert-data/sql-writing)
- - [Schemaless 写入](../reference/schemaless/),支持多种标准写入协议
- - [InfluxDB LINE 协议](../develop/insert-data/influxdb-line)
- - [OpenTSDB Telnet 协议](../develop/insert-data/opentsdb-telnet)
- - [OpenTSDB JSON 协议](../develop/insert-data/opentsdb-json)
- - 与多种第三方工具的无缝集成,它们都可以仅通过配置而无需任何代码即可将数据写入 TDengine
- - [Telegraf](../third-party/telegraf)
- - [Prometheus](../third-party/prometheus)
- - [StatsD](../third-party/statsd)
- - [collectd](../third-party/collectd)
- - [icinga2](../third-party/icinga2)
- - [TCollector](../third-party/tcollector)
- - [EMQ](../third-party/emq-broker)
- - [HiveMQ](../third-party/hive-mq-broker) ;
+ - [SQL 写入](../develop/insert-data/sql-writing)
+ - [无模式(Schemaless)写入](../reference/schemaless/),支持多种标准写入协议
+ - [InfluxDB Line 协议](../develop/insert-data/influxdb-line)
+ - [OpenTSDB Telnet 协议](../develop/insert-data/opentsdb-telnet)
+ - [OpenTSDB JSON 协议](../develop/insert-data/opentsdb-json)
+ - 与多种第三方工具的无缝集成,它们都可以仅通过配置而无需任何代码即可将数据写入 TDengine
+ - [Telegraf](../third-party/telegraf)
+ - [Prometheus](../third-party/prometheus)
+ - [StatsD](../third-party/statsd)
+ - [collectd](../third-party/collectd)
+ - [Icinga2](../third-party/icinga2)
+ - [TCollector](../third-party/tcollector)
+ - [EMQX](../third-party/emq-broker)
+ - [HiveMQ](../third-party/hive-mq-broker)
2. 查询数据,支持
- - [标准SQL](../taos-sql),含嵌套查询
- - [时序数据特色函数](../taos-sql/function/#time-series-extensions)
- - [时序顺序特色查询](../taos-sql/distinguished),例如降采样、插值、累加和、时间加权平均、状态窗口、会话窗口等
- - [用户自定义函数](../taos-sql/udf)
+ - [标准 SQL](../taos-sql),含嵌套查询
+ - [时序数据特色函数](../taos-sql/function/#time-series-extensions)
+ - [时序数据特色查询](../taos-sql/distinguished),例如降采样、插值、累加和、时间加权平均、状态窗口、会话窗口等
+ - [用户自定义函数(UDF)](../taos-sql/udf)
3. [缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis 就能对时序数据进行高效处理
-4. [流式计算](../develop/stream)(Stream Processing),TDengine 不仅支持连续查询,还支持事件驱动的流式计算,这样在处理时序数据时就无需 Flink 或 Spark 这样流计算组件
-5. [数据订阅](../develop/tmq),应用程序可以订阅一张表或一组表的数据,API 与 Kafka 相同,而且可以指定过滤条件
+4. [流式计算(Stream Processing)](../develop/stream),TDengine 不仅支持连续查询,还支持事件驱动的流式计算,这样在处理时序数据时就无需 Flink 或 Spark 这样流式计算组件
+5. [数据订阅](../develop/tmq),应用程序可以订阅一张表或一组表的数据,提供与 Kafka 相同的 API,而且可以指定过滤条件
6. 可视化
- - 支持与 [Grafana](../third-party/grafana/) 的无缝集成
- - 支持与 Google Data Studio 的无缝集成
+ - 支持与 [Grafana](../third-party/grafana/) 的无缝集成
+ - 支持与 Google Data Studio 的无缝集成
7. 集群
- - 集群部署(../deployment/),可以通过增加节点进行水平扩展以提升处理能力
- - 可以通过 [Kubernets 部署 TDengine](../deployment/k8s/)
- - 通过多副本提供高可用能力
+ - [集群部署](../deployment/),可以通过增加节点进行水平扩展以提升处理能力
+ - 可以通过 [Kubernetes 部署 TDengine](../deployment/k8s/)
+ - 通过多副本提供高可用能力
8. 管理
- - [监控](../operation/monitor)运行中的 TDengine 实例
- - 多种[数据导入](../operation/import)方式
- - 多种[数据导出](../operation/export)方式
+ - [监控](../operation/monitor)运行中的 TDengine 实例
+ - 多种[数据导入](../operation/import)方式
+ - 多种[数据导出](../operation/export)方式
9. 工具
- - 提供交互式[命令行程序](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
- - 提供压力测试工具[taosBenchmark](../reference/taosbenchmark),用于测试 TDengine 的性能
+ - 提供[交互式命令行程序(CLI)](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
+ - 提供压力测试工具[taosBenchmark](../reference/taosbenchmark),用于测试 TDengine 的性能
10. 编程
- - 提供各种语言的[连接器](../connector): 如 [C/C++](../connector/cpp), [Java](../connector/java), [Go](../connector/go), [Node.JS](../connector/node), [Rust](../connector/rust), [Python](../connector/python), [C#](../connector/csharp) 等
+ - 提供各种语言的[连接器(Connector)](../connector): 如 [C/C++](../connector/cpp)、[Java](../connector/java)、[Go](../connector/go)、[Node.js](../connector/node)、[Rust](../connector/rust)、[Python](../connector/python)、[C#](../connector/csharp) 等
- 支持 [REST 接口](../connector/rest-api/)
更多细节功能,请阅读整个文档。
@@ -63,36 +63,36 @@ TDengine的主要功能如下:
- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建缓存、流式计算和数据订阅等功能,为时序数据的处理提供了极简的解决方案,从而大幅降低了业务系统的设计复杂度和运维成本。
-- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生的分布式设计、数据分片和分区、存算分离、RAFT 协议、Kubernets 部署和完整的可观测性,TDengine 是一款云原生时序数据库并且能够部署在公有云、私有云和混合云上。
+- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生的分布式设计、数据分片和分区、存算分离、RAFT 协议、Kubernetes 部署和完整的可观测性,TDengine 是一款云原生时序数据库并且能够部署在公有云、私有云和混合云上。
-- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:对系统管理员来说,TDengine 大幅降低了管理和维护的代价。对开发者来说, TDengine 提供了简单的接口、极简的解决方案和与第三方工具的无缝集成。对数据分析专家来说,TDengine 提供了便捷的数据访问。
+- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:对系统管理员来说,TDengine 大幅降低了管理和维护的代价。对开发者来说, TDengine 提供了简单的接口、极简的解决方案和与第三方工具的无缝集成。对数据分析专家来说,TDengine 提供了便捷的数据访问能力。
- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:通过超级表、存储计算分离、分区分片、预计算和其它技术,TDengine 能够高效地浏览、格式化和访问数据。
-- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部在开源协议下公开。全球超过 140k 个运行实例,GitHub Star 19k,且拥有一个活跃的开发者社区。
+- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部在开源协议下公开。全球超过 140k 个运行实例,GitHub Star 19k,且拥有一个活跃的开发者社区。
采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面:
-1. 由于其超强性能,它能将系统需要的计算资源和存储资源大幅降低
+1. 由于其超强性能,它能将系统所需的计算资源和存储资源大幅降低
2. 因为支持 SQL,能与众多第三方软件无缝集成,学习迁移成本大幅下降
-3. 因为是一极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
+3. 因为是一款极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
## 技术生态
-在整个时序大数据平台中,TDengine 在其中扮演的角色如下:
+在整个时序大数据平台中,TDengine 扮演的角色如下:

+
图 1. TDengine 技术生态图
-
图 1. TDengine技术生态图
-上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka, 他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序 (CLI) 以及可视化管理管理。
+上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。
## 典型适用场景
-作为一个高性能、分布式、支持 SQL 的时序数据库 (Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。本文对适用场景做更多详细的分析。
+作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
### 数据源特点和需求
@@ -114,18 +114,18 @@ TDengine的主要功能如下:
### 系统功能需求
-| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
-| -------------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- |
-| 要求完整的内置数据处理算法 | | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。 |
-| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。 |
+| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
+| -------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------- |
+| 要求完整的内置数据处理算法 | | √ | | TDengine 实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有需求,因此特殊类型的处理需求还需要在应用层面解决。 |
+| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据库处理,或者应该考虑 TDengine 和关系型数据库配合实现系统功能。 |
### 系统性能需求
-| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
-| ---------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------ |
-| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
-| 要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
-| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
+| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
+| ---------------------- | ------ | -------- | -------- | -------------------------------------------------------------------------------------------------- |
+| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
+| 要求高速处理数据 | | | √ | TDengine 专门为 IoT 优化的存储和数据处理设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
+| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
### 系统维护需求
diff --git a/docs/zh/07-develop/03-insert-data/05-high-volume.md b/docs/zh/07-develop/03-insert-data/05-high-volume.md
index 32be8cb890..d7581467ae 100644
--- a/docs/zh/07-develop/03-insert-data/05-high-volume.md
+++ b/docs/zh/07-develop/03-insert-data/05-high-volume.md
@@ -11,7 +11,7 @@ import TabItem from "@theme/TabItem";
从客户端程序的角度来说,高效写入数据要考虑以下几个因素:
-1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前,TDengine 支持的一条 SQL 的最大长度为 1,048,576(1M)个字符。可通过配置客户端参数 maxSQLLength(默认值为 65480)进行修改。
+1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前,TDengine 支持的一条 SQL 的最大长度为 1,048,576(1M)个字符。
2. 并发连接数。一般来讲,同时写入数据的并发连接数越多写入越高效(但超过一定阈值反而会下降,取决于服务端处理能力)。
3. 数据在不同表(或子表)之间的分布,即要写入数据的相邻性。一般来说,每批次只向同一张表(或子表)写入数据比向多张表(或子表)写入数据要更高效;
4. 写入方式。一般来讲:
@@ -38,13 +38,9 @@ import TabItem from "@theme/TabItem";
### 服务器配置的角度 {#setting-view}
-从服务器配置的角度来说,也有很多优化写入性能的方法。
+从服务端配置的角度,要根据系统中磁盘的数量,磁盘的 I/O 能力,以及处理器能力在创建数据库时设置适当的 vgroups 数量以充分发挥系统性能。如果 vgroups 过少,则系统性能无法发挥;如果 vgroups 过多,会造成无谓的资源竞争。常规推荐 vgroups 数量为 CPU 核数的 2 倍,但仍然要结合具体的系统资源配置进行调优。
-如果总表数不多(远小于核数乘以1000), 且无论怎么调节客户端程序,taosd 进程的 CPU 使用率都很低,那么很可能是因为表在各个 vgroup 分布不均。比如:数据库总表数是 1000 且 minTablesPerVnode 设置的也是 1000,那么所有的表都会分布在 1 个 vgroup 上。此时如果将 minTablesPerVnode 和 tablelncStepPerVnode 都设置成 100, 则可将表分布至 10 个 vgroup。(假设 maxVgroupsPerDb 大于等于 10)。
-
-如果总表数比较大(比如大于500万),适当增加 maxVgroupsPerDb 也能显著提高建表的速度。maxVgroupsPerDb 默认值为 0, 自动配置为 CPU 的核数。 如果表的数量巨大,也建议调节 maxTablesPerVnode 参数,以免超过单个 vnode 建表的上限。
-
-更多调优参数,请参考 [配置参考](../../../reference/config)部分。
+更多调优参数,请参考 [数据库管理](../../../taos-sql/database) 和 [服务端配置](../../../reference/config)。
## 高效写入示例 {#sample-code}
@@ -352,7 +348,7 @@ main 函数可以接收 5 个启动参数,依次是:
-SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL, 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,如果接近 SQL 最大长度限制(maxSQLLength),将会立即执行 SQL。为了减少 SQL 此时,建议将 maxSQLLength 适当调大。
+SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL, 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,根据 TDengine 3.0 的限制由输入参数 maxSQLLength 传入了支持的最大 SQL 长度,即 1048576 。
SQLWriter
diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md
index 9f999181c4..f936d9182d 100644
--- a/docs/zh/12-taos-sql/10-function.md
+++ b/docs/zh/12-taos-sql/10-function.md
@@ -917,7 +917,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
**返回数据类型**:同应用的字段。
-**适用数据类型**:数值类型。
+**适用数据类型**:数值类型,时间戳类型。
**适用于**:表和超级表。
@@ -932,7 +932,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
**返回数据类型**:同应用的字段。
-**适用数据类型**:数值类型。
+**适用数据类型**:数值类型,时间戳类型。
**适用于**:表和超级表。
diff --git a/docs/zh/12-taos-sql/14-stream.md b/docs/zh/12-taos-sql/14-stream.md
index 70b062a6ca..86437c762e 100644
--- a/docs/zh/12-taos-sql/14-stream.md
+++ b/docs/zh/12-taos-sql/14-stream.md
@@ -58,7 +58,7 @@ SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVA
## 删除流式计算
```sql
-DROP STREAM [IF NOT EXISTS] stream_name;
+DROP STREAM [IF EXISTS] stream_name;
```
仅删除流式计算任务,由流式计算写入的数据不会被删除。
diff --git a/docs/zh/12-taos-sql/22-meta.md b/docs/zh/12-taos-sql/22-meta.md
index e9cda45b0f..3ae444e8fe 100644
--- a/docs/zh/12-taos-sql/22-meta.md
+++ b/docs/zh/12-taos-sql/22-meta.md
@@ -246,3 +246,35 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们
| 1 | dnode_id | INT | dnode 的 ID |
| 2 | name | BINARY(32) | 配置项名称 |
| 3 | value | BINARY(64) | 该配置项的值 |
+
+## INS_TOPICS
+
+| # | **列名** | **数据类型** | **说明** |
+| --- | :---------: | ------------ | ------------------------------ |
+| 1 | topic_name | BINARY(192) | topic 名称 |
+| 2 | db_name | BINARY(64) | topic 相关的 DB |
+| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
+| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
+
+## INS_SUBSCRIPTIONS
+
+| # | **列名** | **数据类型** | **说明** |
+| --- | :------------: | ------------ | ------------------------ |
+| 1 | topic_name | BINARY(204) | 被订阅的 topic |
+| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
+| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
+| 4 | consumer_id | BIGINT | 消费者的唯一 id |
+
+## INS_STREAMS
+
+| # | **列名** | **数据类型** | **说明** |
+| --- | :----------: | ------------ | --------------------------------------- |
+| 1 | stream_name | BINARY(64) | 流计算名称 |
+| 2 | create_time | TIMESTAMP | 创建时间 |
+| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
+| 4 | status | BIANRY(20) | 流当前状态 |
+| 5 | source_db | BINARY(64) | 源数据库 |
+| 6 | target_db | BIANRY(64) | 目的数据库 |
+| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
+| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 |
+| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |
diff --git a/docs/zh/12-taos-sql/23-perf.md b/docs/zh/12-taos-sql/23-perf.md
index e6ff4960a7..808d9ae31a 100644
--- a/docs/zh/12-taos-sql/23-perf.md
+++ b/docs/zh/12-taos-sql/23-perf.md
@@ -62,15 +62,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 12 | sub_status | BINARY(1000) | 子查询状态 |
| 13 | sql | BINARY(1024) | SQL 语句 |
-## PERF_TOPICS
-
-| # | **列名** | **数据类型** | **说明** |
-| --- | :---------: | ------------ | ------------------------------ |
-| 1 | topic_name | BINARY(192) | topic 名称 |
-| 2 | db_name | BINARY(64) | topic 相关的 DB |
-| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
-| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
-
## PERF_CONSUMERS
| # | **列名** | **数据类型** | **说明** |
@@ -84,15 +75,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 |
| 8 | rebalance_time | TIMESTAMP | 上一次触发 rebalance 的时间 |
-## PERF_SUBSCRIPTIONS
-
-| # | **列名** | **数据类型** | **说明** |
-| --- | :------------: | ------------ | ------------------------ |
-| 1 | topic_name | BINARY(204) | 被订阅的 topic |
-| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
-| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
-| 4 | consumer_id | BIGINT | 消费者的唯一 id |
-
## PERF_TRANS
| # | **列名** | **数据类型** | **说明** |
@@ -114,17 +96,3 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 2 | create_time | TIMESTAMP | sma 创建时间 |
| 3 | stable_name | BINARY(192) | sma 所属的超级表名称 |
| 4 | vgroup_id | INT | sma 专属的 vgroup 名称 |
-
-## PERF_STREAMS
-
-| # | **列名** | **数据类型** | **说明** |
-| --- | :----------: | ------------ | --------------------------------------- |
-| 1 | stream_name | BINARY(64) | 流计算名称 |
-| 2 | create_time | TIMESTAMP | 创建时间 |
-| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
-| 4 | status | BIANRY(20) | 流当前状态 |
-| 5 | source_db | BINARY(64) | 源数据库 |
-| 6 | target_db | BIANRY(64) | 目的数据库 |
-| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
-| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 |
-| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |
diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md
index f84ec65b4c..67d25e24aa 100644
--- a/docs/zh/14-reference/05-taosbenchmark.md
+++ b/docs/zh/14-reference/05-taosbenchmark.md
@@ -87,7 +87,7 @@ taosBenchmark -f subscribe.json
```json
-{{#include /taos-tools/example/subscribe.json}}
+{{#include /taos-tools/example/tmq.json}}
```
@@ -405,37 +405,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
订阅子表或者普通表的配置参数在 `specified_table_query` 中设置。
-- **threads** : 执行 SQL 的线程数,默认为 1。
-
-- **interval** : 执行订阅的时间间隔,单位为秒,默认为 0。
-
-- **restart** : "yes" 表示开始新的订阅,"no" 表示继续之前的订阅,默认值为 "no"。
-
-- **keepProgress** : "yes" 表示保留订阅进度,"no" 表示不保留,默认值为 "no"。
-
-- **resubAfterConsume** : "yes" 表示取消之前的订阅然后再次订阅, "no" 表示继续之前的订阅,默认值为 "no"。
+- **threads/concurrent** : 执行 SQL 的线程数,默认为 1。
- **sqls** :
- **sql** : 执行的 SQL 命令,必填。
- - **result** : 保存查询结果的文件,未指定则不保存。
-
-#### 订阅超级表的配置参数
-
-订阅超级表的配置参数在 `super_table_query` 中设置。
-
-- **stblname** : 要订阅的超级表名称,必填。
-
-- **threads** : 执行 SQL 的线程数,默认为 1。
-
-- **interval** : 执行订阅的时间间隔,单位为秒,默认为 0。
-
-- **restart** : "yes" 表示开始新的订阅,"no" 表示继续之前的订阅,默认值为 "no"。
-
-- **keepProgress** : "yes" 表示保留订阅进度,"no" 表示不保留,默认值为 "no"。
-
-- **resubAfterConsume** : "yes" 表示取消之前的订阅然后再次订阅, "no" 表示继续之前的订阅,默认值为 "no"。
-
-- **sqls** :
- - **sql** : 执行的 SQL 命令,必填;对于超级表的查询 SQL,在 SQL 命令中保留 "xxxx",程序会自动将其替换为超级表的所有子表名。
- 替换为超级表中所有的子表名。
- - **result** : 保存查询结果的文件,未指定则不保存。
diff --git a/docs/zh/14-reference/11-docker/index.md b/docs/zh/14-reference/11-docker/index.md
index d712e9aba8..58bbe1e117 100644
--- a/docs/zh/14-reference/11-docker/index.md
+++ b/docs/zh/14-reference/11-docker/index.md
@@ -119,7 +119,7 @@ taos -h tdengine -P 6030
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -234,7 +234,7 @@ go mod tidy
```dockerfile
FROM golang:1.19.0-buster as builder
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -250,7 +250,7 @@ RUN go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
diff --git a/docs/zh/14-reference/14-taosKeeper.md b/docs/zh/14-reference/14-taosKeeper.md
index f1165c9d0f..ae0a496f03 100644
--- a/docs/zh/14-reference/14-taosKeeper.md
+++ b/docs/zh/14-reference/14-taosKeeper.md
@@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
-description: TDengine taosKeeper 使用说明
+description: TDengine 3.0 版本监控指标的导出工具
---
## 简介
@@ -22,26 +22,36 @@ taosKeeper 安装方式:
### 配置和运行方式
-
-taosKeeper 需要在操作系统终端执行,该工具支持 [配置文件启动](#配置文件启动)。
+taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:[命令行参数](#命令行参数启动)、[环境变量](#环境变量启动) 和 [配置文件](#配置文件启动)。优先级为:命令行参数、环境变量、配置文件参数。
**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。** 并且 TDengine 已经开启监控服务,具体请参考:[TDengine 监控配置](../config/#监控相关)。
-
+
+### 环境变量启动
+
+通过设置环境变量达到控制启动参数的目的,通常在容器中运行时使用。
+
+```shell
+$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
+
+$ taoskeeper
+```
+
+具体参数列表请参照 `taoskeeper -h` 输入结果。
+
### 配置文件启动
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。
```shell
-taoskeeper -c
+$ taoskeeper -c
```
**下面是配置文件的示例:**
@@ -110,7 +120,7 @@ Query OK, 1 rows in database (0.036162s)
#### 导出监控指标
```shell
-curl http://127.0.0.1:6043/metrics
+$ curl http://127.0.0.1:6043/metrics
```
部分结果集:
diff --git a/docs/zh/20-third-party/12-google-data-studio.md b/docs/zh/20-third-party/12-google-data-studio.md
new file mode 100644
index 0000000000..bc06f0ea32
--- /dev/null
+++ b/docs/zh/20-third-party/12-google-data-studio.md
@@ -0,0 +1,39 @@
+---
+sidebar_label: Google Data Studio
+title: TDengine Google Data Studio Connector
+description: 使用 Google Data Studio 存取 TDengine 数据的详细指南
+---
+
+Google Data Studio 是一个强大的报表可视化工具,它提供了丰富的数据图表和数据连接,可以非常方便地按照既定模板生成报表。因其简便易用和生态丰富而在数据分析领域得到一众数据科学家的青睐。
+
+Data Studio 可以支持多种数据来源,除了诸如 Google Analytics、Google AdWords、Search Console、BigQuery 等 Google 自己的服务之外,用户也可以直接将离线文件上传至 Google Cloud Storage,或是通过连接器来接入其它数据源。
+
+
+
+目前 TDengine 连接器已经发布到 Google Data Studio 应用商店,你可以在 “Connect to Data” 页面下直接搜索 TDengine,将其选作数据源。
+
+
+
+接下来选择 AUTHORIZE 按钮。
+
+
+
+设置允许连接自己的账号到外部服务。
+
+
+
+在接下来的页面选择运行 TDengine REST 服务的 URL,并输入用户名、密码、数据库名称、表名称以及查询时间范围,并点击右上角的 CONNECT 按钮。
+
+
+
+连接成功后,就可以使用 GDS 方便地进行数据处理并创建报表了。
+
+
+
+目前的维度和指标规则是:timestamp 类型的字段和 tag 字段会被连接器定义为维度,而其他类型的字段是指标。用户还可以根据自己的需求创建不同的表。
+
+
+
+
+
+
diff --git a/docs/zh/20-third-party/gds/gds-01.webp b/docs/zh/20-third-party/gds/gds-01.webp
new file mode 100644
index 0000000000..2e5f9e4ff5
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-01.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-02.png.webp b/docs/zh/20-third-party/gds/gds-02.png.webp
new file mode 100644
index 0000000000..3b3537f5a4
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-02.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-03.png.webp b/docs/zh/20-third-party/gds/gds-03.png.webp
new file mode 100644
index 0000000000..5719436d5b
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-03.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-04.png.webp b/docs/zh/20-third-party/gds/gds-04.png.webp
new file mode 100644
index 0000000000..ddaae5c1a6
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-04.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-05.png.webp b/docs/zh/20-third-party/gds/gds-05.png.webp
new file mode 100644
index 0000000000..9a917678fc
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-05.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-06.png.webp b/docs/zh/20-third-party/gds/gds-06.png.webp
new file mode 100644
index 0000000000..c76b68d32b
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-06.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-07.png.webp b/docs/zh/20-third-party/gds/gds-07.png.webp
new file mode 100644
index 0000000000..1386ae9c4d
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-07.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-08.png.webp b/docs/zh/20-third-party/gds/gds-08.png.webp
new file mode 100644
index 0000000000..59dcf8b31d
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-08.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-09.png.webp b/docs/zh/20-third-party/gds/gds-09.png.webp
new file mode 100644
index 0000000000..b94439f211
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-09.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-10.png.webp b/docs/zh/20-third-party/gds/gds-10.png.webp
new file mode 100644
index 0000000000..a63cad9e9a
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-10.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-11.png.webp b/docs/zh/20-third-party/gds/gds-11.png.webp
new file mode 100644
index 0000000000..fc38cd9a29
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-11.png.webp differ
diff --git a/include/client/taos.h b/include/client/taos.h
index f260b84f4a..49cfbb52b8 100644
--- a/include/client/taos.h
+++ b/include/client/taos.h
@@ -254,6 +254,7 @@ enum tmq_res_t {
TMQ_RES_INVALID = -1,
TMQ_RES_DATA = 1,
TMQ_RES_TABLE_META = 2,
+ TMQ_RES_TAOSX = 3,
};
typedef struct tmq_raw_data {
diff --git a/include/common/systable.h b/include/common/systable.h
index 01c9807627..882c54de95 100644
--- a/include/common/systable.h
+++ b/include/common/systable.h
@@ -43,17 +43,17 @@ extern "C" {
#define TSDB_INS_TABLE_VNODES "ins_vnodes"
#define TSDB_INS_TABLE_CONFIGS "ins_configs"
#define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables"
+#define TSDB_INS_TABLE_SUBSCRIPTIONS "ins_subscriptions"
+#define TSDB_INS_TABLE_TOPICS "ins_topics"
+#define TSDB_INS_TABLE_STREAMS "ins_streams"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_PERFS_TABLE_SMAS "perf_smas"
#define TSDB_PERFS_TABLE_CONNECTIONS "perf_connections"
#define TSDB_PERFS_TABLE_QUERIES "perf_queries"
-#define TSDB_PERFS_TABLE_TOPICS "perf_topics"
#define TSDB_PERFS_TABLE_CONSUMERS "perf_consumers"
-#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "perf_subscriptions"
#define TSDB_PERFS_TABLE_OFFSETS "perf_offsets"
#define TSDB_PERFS_TABLE_TRANS "perf_trans"
-#define TSDB_PERFS_TABLE_STREAMS "perf_streams"
#define TSDB_PERFS_TABLE_APPS "perf_apps"
typedef struct SSysDbTableSchema {
diff --git a/include/common/tcommon.h b/include/common/tcommon.h
index 03672f96f3..891c9ab040 100644
--- a/include/common/tcommon.h
+++ b/include/common/tcommon.h
@@ -73,6 +73,7 @@ enum {
TMQ_MSG_TYPE__POLL_RSP,
TMQ_MSG_TYPE__POLL_META_RSP,
TMQ_MSG_TYPE__EP_RSP,
+ TMQ_MSG_TYPE__TAOSX_RSP,
TMQ_MSG_TYPE__END_RSP,
};
@@ -129,7 +130,6 @@ typedef struct SDataBlockInfo {
uint32_t capacity;
// TODO: optimize and remove following
int64_t version; // used for stream, and need serialization
- int64_t ts; // used for stream, and need serialization
int32_t childId; // used for stream, do not serialize
EStreamType type; // used for stream, do not serialize
STimeWindow calWin; // used for stream, do not serialize
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index 03e15ed8e7..f72cb3d6d9 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -144,8 +144,8 @@ void taosCfgDynamicOptions(const char *option, const char *value);
struct SConfig *taosGetCfg();
-void taosSetAllDebugFlag(int32_t flag);
-void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
+void taosSetAllDebugFlag(int32_t flag, bool rewrite);
+void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
int32_t taosSetCfg(SConfig *pCfg, char *name);
void taosLocalCfgForbiddenToChange(char* name, bool* forbidden);
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index a1a967f533..c8b90a9991 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -276,7 +276,6 @@ struct SSchema {
char name[TSDB_COL_NAME_LEN];
};
-
typedef struct {
char tbName[TSDB_TABLE_NAME_LEN];
char stbName[TSDB_TABLE_NAME_LEN];
@@ -295,17 +294,15 @@ typedef struct {
SSchema* pSchemas;
} STableMetaRsp;
-
-
typedef struct {
- int32_t code;
- int8_t hashMeta;
- int64_t uid;
- char* tblFName;
- int32_t numOfRows;
- int32_t affectedRows;
- int64_t sver;
- STableMetaRsp* pMeta;
+ int32_t code;
+ int8_t hashMeta;
+ int64_t uid;
+ char* tblFName;
+ int32_t numOfRows;
+ int32_t affectedRows;
+ int64_t sver;
+ STableMetaRsp* pMeta;
} SSubmitBlkRsp;
typedef struct {
@@ -320,7 +317,7 @@ typedef struct {
int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp);
int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp);
-void tFreeSSubmitBlkRsp(void* param);
+void tFreeSSubmitBlkRsp(void* param);
void tFreeSSubmitRsp(SSubmitRsp* pRsp);
#define COL_SMA_ON ((int8_t)0x1)
@@ -2049,8 +2046,8 @@ typedef struct {
STableMetaRsp* pMeta;
} SVCreateTbRsp, SVUpdateTbRsp;
-int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
-int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
+int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
+int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
void tFreeSVCreateTbRsp(void* param);
int32_t tSerializeSVCreateTbReq(void** buf, SVCreateTbReq* pReq);
@@ -2630,6 +2627,22 @@ typedef struct {
};
} STqOffsetVal;
+static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) {
+ pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA;
+ pOffsetVal->uid = uid;
+ pOffsetVal->ts = ts;
+}
+
+static FORCE_INLINE void tqOffsetResetToMeta(STqOffsetVal* pOffsetVal, int64_t uid) {
+ pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_META;
+ pOffsetVal->uid = uid;
+}
+
+static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) {
+ pOffsetVal->type = TMQ_OFFSET__LOG;
+ pOffsetVal->version = ver;
+}
+
int32_t tEncodeSTqOffsetVal(SEncoder* pEncoder, const STqOffsetVal* pOffsetVal);
int32_t tDecodeSTqOffsetVal(SDecoder* pDecoder, STqOffsetVal* pOffsetVal);
int32_t tFormatOffset(char* buf, int32_t maxLen, const STqOffsetVal* pVal);
@@ -2961,6 +2974,26 @@ typedef struct {
int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);
int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp);
+void tDeleteSMqDataRsp(SMqDataRsp* pRsp);
+
+typedef struct {
+ SMqRspHead head;
+ STqOffsetVal reqOffset;
+ STqOffsetVal rspOffset;
+ int32_t blockNum;
+ int8_t withTbName;
+ int8_t withSchema;
+ SArray* blockDataLen;
+ SArray* blockData;
+ SArray* blockTbName;
+ SArray* blockSchema;
+ int32_t createTableNum;
+ SArray* createTableLen;
+ SArray* createTableReq;
+} STaosxRsp;
+
+int32_t tEncodeSTaosxRsp(SEncoder* pEncoder, const STaosxRsp* pRsp);
+int32_t tDecodeSTaosxRsp(SDecoder* pDecoder, STaosxRsp* pRsp);
typedef struct {
SMqRspHead head;
diff --git a/include/common/ttypes.h b/include/common/ttypes.h
index ceb3eae033..a88f65f6ac 100644
--- a/include/common/ttypes.h
+++ b/include/common/ttypes.h
@@ -49,9 +49,6 @@ typedef struct {
#define varDataCopy(dst, v) memcpy((dst), (void *)(v), varDataTLen(v))
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
-#define IS_VAR_DATA_TYPE(t) \
- (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
-#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
#define varDataNetLen(v) (htons(((VarDataLenT *)(v))[0]))
#define varDataNetTLen(v) (sizeof(VarDataLenT) + varDataNetLen(v))
@@ -268,11 +265,16 @@ typedef struct {
#define IS_UNSIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_UTINYINT && (_t) <= TSDB_DATA_TYPE_UBIGINT)
#define IS_FLOAT_TYPE(_t) ((_t) == TSDB_DATA_TYPE_FLOAT || (_t) == TSDB_DATA_TYPE_DOUBLE)
#define IS_INTEGER_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)))
+#define IS_TIMESTAMP_TYPE(_t) ((_t) == TSDB_DATA_TYPE_TIMESTAMP)
#define IS_NUMERIC_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)) || (IS_FLOAT_TYPE(_t)))
#define IS_MATHABLE_TYPE(_t) \
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
+#define IS_VAR_DATA_TYPE(t) \
+ (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
+#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
+
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)
#define IS_VALID_INT(_t) ((_t) >= INT32_MIN && (_t) <= INT32_MAX)
diff --git a/include/libs/function/function.h b/include/libs/function/function.h
index c8db01625e..3f26eee86a 100644
--- a/include/libs/function/function.h
+++ b/include/libs/function/function.h
@@ -92,6 +92,8 @@ struct SResultRowEntryInfo;
//for selectivity query, the corresponding tag value is assigned if the data is qualified
typedef struct SSubsidiaryResInfo {
int16_t num;
+ int32_t rowLen;
+ char* buf; // serialize data buffer
struct SqlFunctionCtx **pCtx;
} SSubsidiaryResInfo;
@@ -118,6 +120,11 @@ typedef struct SInputColumnInfoData {
uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions.
} SInputColumnInfoData;
+typedef struct SSerializeDataHandle {
+ struct SDiskbasedBuf* pBuf;
+ int32_t currentPage;
+} SSerializeDataHandle;
+
// sql function runtime context
typedef struct SqlFunctionCtx {
SInputColumnInfoData input;
@@ -137,10 +144,9 @@ typedef struct SqlFunctionCtx {
SFuncExecFuncs fpSet;
SScalarFuncExecFuncs sfp;
struct SExprInfo *pExpr;
- struct SDiskbasedBuf *pBuf;
struct SSDataBlock *pSrcBlock;
struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
- int32_t curBufPage;
+ SSerializeDataHandle saveHandle;
bool isStream;
char udfName[TSDB_FUNC_NAME_LEN];
diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h
index 741b0fddeb..c9c19579cb 100644
--- a/include/libs/function/functionMgt.h
+++ b/include/libs/function/functionMgt.h
@@ -176,7 +176,8 @@ int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen);
EFuncReturnRows fmGetFuncReturnRows(SFunctionNode* pFunc);
-bool fmIsBuiltinFunc(const char* pFunc);
+bool fmIsBuiltinFunc(const char* pFunc);
+EFunctionType fmGetFuncType(const char* pFunc);
bool fmIsAggFunc(int32_t funcId);
bool fmIsScalarFunc(int32_t funcId);
diff --git a/include/util/tencode.h b/include/util/tencode.h
index ad642cd612..a6dd58297e 100644
--- a/include/util/tencode.h
+++ b/include/util/tencode.h
@@ -264,12 +264,14 @@ static FORCE_INLINE int32_t tEncodeDouble(SEncoder* pCoder, double val) {
static FORCE_INLINE int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len) {
if (tEncodeU32v(pCoder, len) < 0) return -1;
- if (pCoder->data) {
- if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
- memcpy(TD_CODER_CURRENT(pCoder), val, len);
- }
+ if (len) {
+ if (pCoder->data) {
+ if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
+ memcpy(TD_CODER_CURRENT(pCoder), val, len);
+ }
- TD_CODER_MOVE_POS(pCoder, len);
+ TD_CODER_MOVE_POS(pCoder, len);
+ }
return 0;
}
@@ -414,14 +416,18 @@ static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val) {
static FORCE_INLINE int32_t tDecodeBinaryAlloc(SDecoder* pCoder, void** val, uint64_t* len) {
uint64_t length = 0;
if (tDecodeU64v(pCoder, &length) < 0) return -1;
- if (len) *len = length;
+ if (length) {
+ if (len) *len = length;
- if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
- *val = taosMemoryMalloc(length);
- if (*val == NULL) return -1;
- memcpy(*val, TD_CODER_CURRENT(pCoder), length);
+ if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
+ *val = taosMemoryMalloc(length);
+ if (*val == NULL) return -1;
+ memcpy(*val, TD_CODER_CURRENT(pCoder), length);
- TD_CODER_MOVE_POS(pCoder, length);
+ TD_CODER_MOVE_POS(pCoder, length);
+ } else {
+ *val = NULL;
+ }
return 0;
}
diff --git a/include/util/tpagedbuf.h b/include/util/tpagedbuf.h
index 57a489c0dd..9ab89273e6 100644
--- a/include/util/tpagedbuf.h
+++ b/include/util/tpagedbuf.h
@@ -58,11 +58,10 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
/**
*
* @param pBuf
- * @param groupId
* @param pageId
* @return
*/
-void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId);
+void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId);
/**
*
diff --git a/packaging/MPtestJenkinsfile b/packaging/MPtestJenkinsfile
index 5e259d56be..77f642180a 100644
--- a/packaging/MPtestJenkinsfile
+++ b/packaging/MPtestJenkinsfile
@@ -64,6 +64,11 @@ pipeline {
defaultValue:'2.1.2',
description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
)
+ string (
+ name:'nasPassword',
+ defaultValue:'password',
+ description: 'the pasword of the NAS server which has installPackage-192.168.1.131'
+ )
}
environment{
WORK_DIR = '/var/lib/jenkins/workspace'
@@ -107,21 +112,21 @@ pipeline {
stage('ubuntu16') {
agent{label " ubuntu16 "}
steps {
- timeout(time: 10, unit: 'MINUTES'){
+ timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
- bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
- bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
- bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
}
@@ -130,21 +135,26 @@ pipeline {
stage('ubuntu18') {
agent{label " ubuntu18 "}
steps {
- timeout(time: 10, unit: 'MINUTES'){
+ timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
- bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
- bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
- bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_DEB} ${baseVersion} server
+ bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_CLIENT_TAR} ${version} ${BASE_TD_CLIENT_TAR} ${baseVersion} client ${nasPassword}
python3 checkPackageRuning.py
'''
}
@@ -153,21 +163,21 @@ pipeline {
stage('centos7') {
agent{label " centos7_9 "}
steps {
- timeout(time: 10, unit: 'MINUTES'){
+ timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
- bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
- bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
- bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
}
@@ -176,22 +186,45 @@ pipeline {
stage('centos8') {
agent{label " centos8_3 "}
steps {
- timeout(time: 10, unit: 'MINUTES'){
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_CLIENT_LITE_TAR} ${version} ${BASE_TD_CLIENT_LITE_TAR} ${baseVersion} client ${nasPassword}
+ python3 checkPackageRuning.py
+ '''
+ }
+ }
+ }
+
+ stage('arm64') {
+ agent{label 'linux_arm64'}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
- bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
- python3 checkPackageRuning.py
- rmtaos
- '''
- sh '''
- cd ${TDENGINE_ROOT_DIR}/packaging
- bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
+ bash testpackage.sh ${TD_SERVER_ARM_TAR} ${version} ${BASE_TD_SERVER_ARM_TAR} ${baseVersion} server ${nasPassword}
python3 checkPackageRuning.py
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
- bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ bash testpackage.sh ${TD_CLIENT_ARM_TAR} ${version} ${BASE_TD_CLIENT_ARM_TAR} ${baseVersion} client ${nasPassword}
python3 checkPackageRuning.py
'''
}
diff --git a/packaging/debRpmAutoInstall.sh b/packaging/debRpmAutoInstall.sh
new file mode 100755
index 0000000000..1f51378c91
--- /dev/null
+++ b/packaging/debRpmAutoInstall.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/expect
+set packgeName [lindex $argv 0]
+set packageSuffix [lindex $argv 1]
+set timeout 3
+if { ${packageSuffix} == "deb" } {
+ spawn dpkg -i ${packgeName}
+} elseif { ${packageSuffix} == "rpm"} {
+ spawn rpm -ivh ${packgeName}
+}
+expect "*one:"
+send "\r"
+expect "*skip:"
+send "\r"
diff --git a/packaging/docker/README.md b/packaging/docker/README.md
index cb27d3bca6..763ab73724 100644
--- a/packaging/docker/README.md
+++ b/packaging/docker/README.md
@@ -158,7 +158,7 @@ When you build your application with docker, you should add the TDengine client
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -265,7 +265,7 @@ Full version of dockerfile could be:
```dockerfile
FROM golang:1.17.6-buster as builder
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -279,7 +279,7 @@ RUN go env && go mod tidy && go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh
index 669b0c9e1e..56da9e59be 100755
--- a/packaging/testpackage.sh
+++ b/packaging/testpackage.sh
@@ -1,12 +1,24 @@
#!/bin/sh
-
-
+#parameter
+scriptDir=$(dirname $(readlink -f $0))
packgeName=$1
version=$2
originPackageName=$3
originversion=$4
testFile=$5
subFile="taos.tar.gz"
+password=$6
+
+# Color setting
+RED='\033[41;30m'
+GREEN='\033[1;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[1;34m'
+GREEN_DARK='\033[0;32m'
+YELLOW_DARK='\033[0;33m'
+BLUE_DARK='\033[0;34m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
if [ ${testFile} = "server" ];then
tdPath="TDengine-server-${version}"
@@ -23,109 +35,211 @@ elif [ ${testFile} = "tools" ];then
fi
function cmdInstall {
-comd=$1
-if command -v ${comd} ;then
- echo "${comd} is already installed"
+command=$1
+if command -v ${command} ;then
+ echoColor YD "${command} is already installed"
else
if command -v apt ;then
- apt-get install ${comd} -y
+ apt-get install ${command} -y
elif command -v yum ;then
- yum -y install ${comd}
- echo "you should install ${comd} manually"
+ yum -y install ${command}
+ echoColor YD "you should install ${command} manually"
fi
fi
}
+function echoColor {
+color=$1
+command=$2
-echo "Uninstall all components of TDeingne"
-
-if command -v rmtaos ;then
- echo "uninstall all components of TDeingne:rmtaos"
- rmtaos
-else
- echo "os doesn't include TDengine "
+if [ ${color} = 'Y' ];then
+ echo -e "${YELLOW}${command}${NC}"
+elif [ ${color} = 'YD' ];then
+ echo -e "${YELLOW_DARK}${command}${NC}"
+elif [ ${color} = 'R' ];then
+ echo -e "${RED}${command}${NC}"
+elif [ ${color} = 'G' ];then
+ echo -e "${GREEN}${command}${NC}\r\n"
+elif [ ${color} = 'B' ];then
+ echo -e "${BLUE}${command}${NC}"
+elif [ ${color} = 'BD' ];then
+ echo -e "${BLUE_DARK}${command}${NC}"
fi
+}
-if command -v rmtaostools ;then
- echo "uninstall all components of TDeingne:rmtaostools"
- rmtaostools
-else
- echo "os doesn't include rmtaostools "
-fi
+echoColor G "===== install basesoft ====="
cmdInstall tree
cmdInstall wget
+cmdInstall sshpass
-echo "new workroom path"
+echoColor G "===== Uninstall all components of TDeingne ====="
+
+if command -v rmtaos ;then
+ echoColor YD "uninstall all components of TDeingne:rmtaos"
+ rmtaos
+else
+ echoColor YD "os doesn't include TDengine"
+fi
+
+if command -v rmtaostools ;then
+ echoColor YD "uninstall all components of TDeingne:rmtaostools"
+ rmtaostools
+else
+ echoColor YD "os doesn't include rmtaostools "
+fi
+
+
+
+
+echoColor G "===== new workroom path ====="
installPath="/usr/local/src/packageTest"
oriInstallPath="/usr/local/src/packageTest/3.1"
if [ ! -d ${installPath} ] ;then
+ echoColor BD "mkdir -p ${installPath}"
mkdir -p ${installPath}
else
- echo "${installPath} already exists"
+ echoColor YD "${installPath} already exists"
fi
+if [ -d ${installPath}/${tdPath} ] ;then
+ echoColor BD "rm -rf ${installPath}/${tdPath}/*"
+ rm -rf ${installPath}/${tdPath}/*
+fi
if [ ! -d ${oriInstallPath} ] ;then
+ echoColor BD "mkdir -p ${oriInstallPath}"
mkdir -p ${oriInstallPath}
else
- echo "${oriInstallPath} already exists"
+ echoColor YD "${oriInstallPath} already exists"
fi
-echo "decompress installPackage"
+if [ -d ${oriInstallPath}/${originTdpPath} ] ;then
+ echoColor BD "rm -rf ${oriInstallPath}/${originTdpPath}/*"
+ rm -rf ${oriInstallPath}/${originTdpPath}/*
+fi
+
+
+echoColor G "===== download installPackage ====="
+# cd ${installPath}
+# wget https://www.taosdata.com/assets-download/3.0/${packgeName}
+# cd ${oriInstallPath}
+# wget https://www.taosdata.com/assets-download/3.0/${originPackageName}
cd ${installPath}
-wget https://www.taosdata.com/assets-download/3.0/${packgeName}
-cd ${oriInstallPath}
-wget https://www.taosdata.com/assets-download/3.0/${originPackageName}
+cp -r ${scriptDir}/debRpmAutoInstall.sh .
+if [ ! -f {packgeName} ];then
+ echoColor BD "sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} ."
+ sshpass -p ${password} scp -oStrictHostKeyChecking=no -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} .
+fi
+
+packageSuffix=$(echo ${packgeName} | awk -F '.' '{print $NF}')
+
+
+if [ ! -f debRpmAutoInstall.sh ];then
+ echo '#!/usr/bin/expect ' > debRpmAutoInstall.sh
+ echo 'set packgeName [lindex $argv 0]' >> debRpmAutoInstall.sh
+ echo 'set packageSuffix [lindex $argv 1]' >> debRpmAutoInstall.sh
+ echo 'set timeout 3 ' >> debRpmAutoInstall.sh
+ echo 'if { ${packageSuffix} == "deb" } {' >> debRpmAutoInstall.sh
+ echo ' spawn dpkg -i ${packgeName} ' >> debRpmAutoInstall.sh
+ echo '} elseif { ${packageSuffix} == "rpm"} {' >> debRpmAutoInstall.sh
+ echo ' spawn rpm -ivh ${packgeName}' >> debRpmAutoInstall.sh
+ echo '}' >> debRpmAutoInstall.sh
+ echo 'expect "*one:"' >> debRpmAutoInstall.sh
+ echo 'send "\r"' >> debRpmAutoInstall.sh
+ echo 'expect "*skip:"' >> debRpmAutoInstall.sh
+ echo 'send "\r" ' >> debRpmAutoInstall.sh
+fi
+
+
+echoColor G "===== instal Package ====="
if [[ ${packgeName} =~ "deb" ]];then
- echo "dpkg ${packgeName}" && dpkg -i ${packgeName}
+ cd ${installPath}
+ dpkg -r taostools
+ dpkg -r tdengine
+ if [[ ${packgeName} =~ "TDengine" ]];then
+ echoColor BD "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}
+ else
+ echoColor BD "dpkg -i ${packgeName}" && dpkg -i ${packgeName}
+ fi
elif [[ ${packgeName} =~ "rpm" ]];then
- echo "rpm ${packgeName}" && rpm -ivh ${packgeName}
+ cd ${installPath}
+ sudo rpm -e tdengine
+ sudo rpm -e taostools
+ if [[ ${packgeName} =~ "TDengine" ]];then
+ echoColor BD "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}
+ else
+ echoColor BD "rpm -ivh ${packgeName}" && rpm -ivh ${packgeName}
+ fi
elif [[ ${packgeName} =~ "tar" ]];then
- echo "tar ${packgeName}" && tar -xvf ${packgeName}
- cd ${oriInstallPath}
- echo "tar -xvf ${originPackageName}" && tar -xvf ${originPackageName}
+ echoColor G "===== check installPackage File of tar ====="
+ cd ${oriInstallPath}
+ if [ ! -f {originPackageName} ];then
+ echoColor YD "download base installPackage"
+ echoColor BD "sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} ."
+ sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${originversion}/community/${originPackageName} .
+ fi
+ echoColor YD "unzip the base installation package"
+ echoColor BD "tar -xf ${originPackageName}" && tar -xf ${originPackageName}
cd ${installPath}
- echo "tar -xvf ${packgeName}" && tar -xvf ${packgeName}
-
+ echoColor YD "unzip the new installation package"
+ echoColor BD "tar -xf ${packgeName}" && tar -xf ${packgeName}
if [ ${testFile} != "tools" ] ;then
- cd ${installPath}/${tdPath} && tar vxf ${subFile}
- cd ${oriInstallPath}/${originTdpPath} && tar vxf ${subFile}
+ cd ${installPath}/${tdPath} && tar xf ${subFile}
+ cd ${oriInstallPath}/${originTdpPath} && tar xf ${subFile}
fi
- echo "check installPackage File"
-
+ cd ${oriInstallPath}/${originTdpPath} && tree > ${installPath}/base_${originversion}_checkfile
+ cd ${installPath}/${tdPath} && tree > ${installPath}/now_${version}_checkfile
+
cd ${installPath}
-
- tree ${oriInstallPath}/${originTdpPath} > ${originPackageName}_checkfile
- tree ${installPath}/${tdPath} > ${packgeName}_checkfile
-
- diff ${packgeName}_checkfile ${originPackageName}_checkfile > ${installPath}/diffFile.log
+ diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log
diffNumbers=`cat ${installPath}/diffFile.log |wc -l `
- if [ ${diffNumbers} != 0 ];then
- echo "The number and names of files have changed from the previous installation package"
- echo `cat ${installPath}/diffFile.log`
- exit -1
- fi
+ if [ ${diffNumbers} != 0 ];then
+ echoColor R "The number and names of files is different from the previous installation package"
+ echoColor Y `cat ${installPath}/diffFile.log`
+ exit -1
+ else
+ echoColor G "The number and names of files are the same as previous installation packages"
+ fi
+ echoColor YD "===== install Package of tar ====="
cd ${installPath}/${tdPath}
if [ ${testFile} = "server" ];then
+ echoColor BD "bash ${installCmd} -e no "
bash ${installCmd} -e no
else
+ echoColor BD "bash ${installCmd} "
bash ${installCmd}
fi
- if [[ ${packgeName} =~ "Lite" ]];then
- cd ${installPath}
- wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz
- tar xvf taosTools-2.1.2-Linux-x64.tar.gz
- cd taosTools-2.1.2 && bash install-taostools.sh
- fi
-
fi
+cd ${installPath}
+
+if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then
+ echoColor G "===== install taos-tools when package is lite or client ====="
+ cd ${installPath}
+ sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
+ # wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz
+ tar xf taosTools-2.1.2-Linux-x64.tar.gz
+ cd taosTools-2.1.2 && bash install-taostools.sh
+elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then
+ echoColor G "===== install taos-tools when package is lite or client ====="
+ cd ${installPath}
+ sshpass -p ${password} scp -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
+ tar xf taosTools-2.1.2-Linux-x64.tar.gz
+ cd taosTools-2.1.2 && bash install-taostools.sh
+elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then
+ echoColor G "===== install taos-tools when package is lite or client ====="
+ cd ${installPath}
+ sshpass -p ${password} scp -oStrictHostKeyChecking=no -oStrictHostKeyChecking=no 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
+ tar xf taosTools-2.1.2-Linux-x64.tar.gz
+ cd taosTools-2.1.2 && bash install-taostools.sh
+fi
+
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index 07e5f75e87..b8fa9580e7 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -52,15 +52,17 @@ enum {
RES_TYPE__QUERY = 1,
RES_TYPE__TMQ,
RES_TYPE__TMQ_META,
+ RES_TYPE__TAOSX,
};
#define SHOW_VARIABLES_RESULT_COLS 2
#define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
-#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
-#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
-#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
+#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
+#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ || *(int8_t*)res == RES_TYPE__TAOSX)
+#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
+#define TD_RES_TMQ_TAOSX(res) (*(int8_t*)res == RES_TYPE__TAOSX)
typedef struct SAppInstInfo SAppInstInfo;
@@ -101,6 +103,8 @@ typedef struct SQueryExecMetric {
int64_t ctgStart; // start to parse, us
int64_t ctgEnd; // end to parse, us
int64_t semanticEnd;
+ int64_t planEnd;
+ int64_t resultReady;
int64_t execEnd;
int64_t send; // start to send to server, us
int64_t rsp; // receive response from server, us
@@ -198,8 +202,8 @@ typedef struct {
int32_t vgId;
SSchemaWrapper schema;
int32_t resIter;
- SMqDataRsp rsp;
SReqResultInfo resInfo;
+ SMqDataRsp rsp;
} SMqRspObj;
typedef struct {
@@ -210,6 +214,17 @@ typedef struct {
SMqMetaRsp metaRsp;
} SMqMetaRspObj;
+typedef struct {
+ int8_t resType;
+ char topic[TSDB_TOPIC_FNAME_LEN];
+ char db[TSDB_DB_FNAME_LEN];
+ int32_t vgId;
+ SSchemaWrapper schema;
+ int32_t resIter;
+ SReqResultInfo resInfo;
+ STaosxRsp rsp;
+} SMqTaosxRspObj;
+
typedef struct SRequestObj {
int8_t resType; // query or tmq
uint64_t requestId;
@@ -369,7 +384,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData*
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest);
int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList);
void doAsyncQuery(SRequestObj* pRequest, bool forceUpdateMeta);
-int32_t removeMeta(STscObj* pTscObj, SArray* tbList);
+int32_t removeMeta(STscObj* pTscObj, SArray* tbList);
int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog);
int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog);
bool qnodeRequired(SRequestObj* pRequest);
diff --git a/source/client/src/TMQConnector.c b/source/client/src/TMQConnector.c
index 17d3a212c4..fcf6957df9 100644
--- a/source/client/src/TMQConnector.c
+++ b/source/client/src/TMQConnector.c
@@ -42,6 +42,7 @@ void commit_cb(tmq_t *tmq, int32_t code, void *param) {
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqConfNewImp(JNIEnv *env, jobject jobj) {
tmq_conf_t *conf = tmq_conf_new();
+ jniGetGlobalMethod(env);
return (jlong)conf;
}
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index 1342e89b52..bf92a9ba6a 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -76,18 +76,19 @@ static void deregisterRequest(SRequestObj *pRequest) {
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
- tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, exec:%" PRId64 "us",
- duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
- pRequest->metric.ctgEnd - pRequest->metric.ctgStart,
- pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
+ tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
+ "us, exec:%" PRId64 "us",
+ duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
+ pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
pRequest->metric.execEnd - pRequest->metric.semanticEnd);
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
- tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, exec:%" PRId64 "us",
- duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
- pRequest->metric.ctgEnd - pRequest->metric.ctgStart,
- pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
- pRequest->metric.execEnd - pRequest->metric.semanticEnd);
+ tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
+ "us, planner:%" PRId64 "us, exec:%" PRId64 "us",
+ duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
+ pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
+ pRequest->metric.planEnd - pRequest->metric.semanticEnd,
+ pRequest->metric.resultReady - pRequest->metric.planEnd);
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
}
diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c
index 9475d1b51e..56e3527f96 100644
--- a/source/client/src/clientHb.c
+++ b/source/client/src/clientHb.c
@@ -145,7 +145,7 @@ static int32_t hbProcessStbInfoRsp(void *value, int32_t valueLen, struct SCatalo
}
static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
- SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
+ SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
if (NULL == pReq) {
tscWarn("pReq to get activeInfo, may be dropped, refId:%" PRIx64 ", type:%d", pRsp->connKey.tscRid,
pRsp->connKey.connType);
@@ -260,6 +260,8 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
}
}
+ taosHashRelease(pAppHbMgr->activeInfo, pReq);
+
return TSDB_CODE_SUCCESS;
}
@@ -914,10 +916,11 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in
}
void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey) {
- SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
+ SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
if (pReq) {
tFreeClientHbReq(pReq);
taosHashRemove(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
+ taosHashRelease(pAppHbMgr->activeInfo, pReq);
}
if (NULL == pReq) {
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index 2baca1288f..f91ceb3184 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -728,7 +728,7 @@ int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog
tFreeSTableMetaRsp(blk->pMeta);
taosMemoryFreeClear(blk->pMeta);
}
-
+
if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
continue;
}
@@ -851,6 +851,8 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
SRequestObj* pRequest = (SRequestObj*)param;
pRequest->code = code;
+ pRequest->metric.resultReady = taosGetTimestampUs();
+
if (pResult) {
memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult));
}
@@ -1030,6 +1032,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
pRequest->body.subplanNum = pDag->numOfSubplans;
}
+ pRequest->metric.planEnd = taosGetTimestampUs();
+
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL;
buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta);
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 9ceb6e0683..3086078080 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -184,6 +184,19 @@ void taos_free_result(TAOS_RES *res) {
SRequestObj *pRequest = (SRequestObj *)res;
tscDebug("0x%" PRIx64 " taos_free_result start to free query", pRequest->requestId);
destroyRequest(pRequest);
+ } else if (TD_RES_TMQ_TAOSX(res)) {
+ SMqTaosxRspObj *pRsp = (SMqTaosxRspObj *)res;
+ if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
+ if (pRsp->rsp.blockDataLen) taosArrayDestroy(pRsp->rsp.blockDataLen);
+ if (pRsp->rsp.withTbName) taosArrayDestroyP(pRsp->rsp.blockTbName, taosMemoryFree);
+ if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ // taosx
+ taosArrayDestroy(pRsp->rsp.createTableLen);
+ taosArrayDestroyP(pRsp->rsp.createTableReq, taosMemoryFree);
+
+ pRsp->resInfo.pRspMsg = NULL;
+ doFreeReqResultInfo(&pRsp->resInfo);
+ taosMemoryFree(pRsp);
} else if (TD_RES_TMQ(res)) {
SMqRspObj *pRsp = (SMqRspObj *)res;
if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c
index fa657fcb10..f08f54ef4b 100644
--- a/source/client/src/tmq.c
+++ b/source/client/src/tmq.c
@@ -164,6 +164,7 @@ typedef struct {
union {
SMqDataRsp dataRsp;
SMqMetaRsp metaRsp;
+ STaosxRsp taosxRsp;
};
} SMqPollRspWrapper;
@@ -810,8 +811,19 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) {
}
int32_t tmq_unsubscribe(tmq_t* tmq) {
+ int32_t rsp;
+ int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
- int32_t rsp = tmq_subscribe(tmq, lst);
+ while (1) {
+ rsp = tmq_subscribe(tmq, lst);
+ if (rsp != TSDB_CODE_MND_CONSUMER_NOT_READY || retryCnt > 5) {
+ break;
+ } else {
+ retryCnt++;
+ taosMsleep(500);
+ }
+ }
+
tmq_list_destroy(lst);
return rsp;
}
@@ -1130,21 +1142,29 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp);
tDecoderClear(&decoder);
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
- } else {
- ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP);
+
+ tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
+ tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
+ rspType);
+
+ } else if (rspType == TMQ_MSG_TYPE__POLL_META_RSP) {
SDecoder decoder;
tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
tDecodeSMqMetaRsp(&decoder, &pRspWrapper->metaRsp);
tDecoderClear(&decoder);
memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead));
+ } else if (rspType == TMQ_MSG_TYPE__TAOSX_RSP) {
+ SDecoder decoder;
+ tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
+ tDecodeSTaosxRsp(&decoder, &pRspWrapper->taosxRsp);
+ tDecoderClear(&decoder);
+ memcpy(&pRspWrapper->taosxRsp, pMsg->pData, sizeof(SMqRspHead));
+ } else {
+ ASSERT(0);
}
taosMemoryFree(pMsg->pData);
- tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
- tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
- rspType);
-
taosWriteQitem(tmq->mqueue, pRspWrapper);
tsem_post(&tmq->rspSem);
@@ -1443,6 +1463,24 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper) {
return pRspObj;
}
+SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
+ SMqTaosxRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqTaosxRspObj));
+ pRspObj->resType = RES_TYPE__TAOSX;
+ tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN);
+ tstrncpy(pRspObj->db, pWrapper->topicHandle->db, TSDB_DB_FNAME_LEN);
+ pRspObj->vgId = pWrapper->vgHandle->vgId;
+ pRspObj->resIter = -1;
+ memcpy(&pRspObj->rsp, &pWrapper->dataRsp, sizeof(SMqTaosxRspObj));
+
+ pRspObj->resInfo.totalRows = 0;
+ pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI;
+ if (!pWrapper->dataRsp.withSchema) {
+ setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
+ }
+
+ return pRspObj;
+}
+
int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
/*tscDebug("call poll");*/
for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
@@ -1595,6 +1633,30 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
taosFreeQitem(pollRspWrapper);
}
+ } else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__TAOSX_RSP) {
+ SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper;
+ /*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/
+ int32_t consumerEpoch = atomic_load_32(&tmq->epoch);
+ if (pollRspWrapper->taosxRsp.head.epoch == consumerEpoch) {
+ SMqClientVg* pVg = pollRspWrapper->vgHandle;
+ /*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
+ * rspMsg->msg.rspOffset);*/
+ pVg->currentOffset = pollRspWrapper->taosxRsp.rspOffset;
+ atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
+ if (pollRspWrapper->taosxRsp.blockNum == 0) {
+ taosFreeQitem(pollRspWrapper);
+ rspWrapper = NULL;
+ continue;
+ }
+ // build rsp
+ SMqRspObj* pRsp = tmqBuildRspFromWrapper(pollRspWrapper);
+ taosFreeQitem(pollRspWrapper);
+ return pRsp;
+ } else {
+ tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d\n",
+ pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
+ taosFreeQitem(pollRspWrapper);
+ }
} else {
/*printf("handle ep rsp %d\n", rspMsg->head.mqMsgType);*/
bool reset = false;
@@ -1707,9 +1769,11 @@ tmq_res_t tmq_get_res_type(TAOS_RES* res) {
} else if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DELETE) {
- return TMQ_RES_DATA;
+ return TMQ_RES_TAOSX;
}
return TMQ_RES_TABLE_META;
+ } else if (TD_RES_TMQ_TAOSX(res)) {
+ return TMQ_RES_TAOSX;
} else {
return TMQ_RES_INVALID;
}
diff --git a/source/common/src/systable.c b/source/common/src/systable.c
index 9ca896c9ee..dffef21ac4 100644
--- a/source/common/src/systable.c
+++ b/source/common/src/systable.c
@@ -240,6 +240,22 @@ static const SSysDbTableSchema variablesSchema[] = {
{.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
+static const SSysDbTableSchema topicSchema[] = {
+ {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ // TODO config
+};
+
+
+static const SSysDbTableSchema subscriptionSchema[] = {
+ {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+};
+
static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true},
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
@@ -260,6 +276,9 @@ static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema), true},
{TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema), true},
{TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema), true},
+ {TSDB_INS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false},
+ {TSDB_INS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false},
+ {TSDB_INS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false},
};
static const SSysDbTableSchema connectionsSchema[] = {
@@ -272,13 +291,6 @@ static const SSysDbTableSchema connectionsSchema[] = {
{.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
-static const SSysDbTableSchema topicSchema[] = {
- {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
- {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
- // TODO config
-};
static const SSysDbTableSchema consumerSchema[] = {
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
@@ -292,13 +304,6 @@ static const SSysDbTableSchema consumerSchema[] = {
{.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
-static const SSysDbTableSchema subscriptionSchema[] = {
- {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
- {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
- {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
-};
-
static const SSysDbTableSchema offsetSchema[] = {
{.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
@@ -345,13 +350,10 @@ static const SSysDbTableSchema appSchema[] = {
static const SSysTableMeta perfsMeta[] = {
{TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema), false},
{TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema), false},
- {TSDB_PERFS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false},
{TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema), false},
- {TSDB_PERFS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false},
// {TSDB_PERFS_TABLE_OFFSETS, offsetSchema, tListLen(offsetSchema)},
{TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema), false},
// {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema), false},
- {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false},
{TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema), false}};
// clang-format on
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index ee9d751555..0bab6a8611 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -427,6 +427,152 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
return 0;
}
+static int32_t taosUpdateServerCfg(SConfig *pCfg) {
+ SConfigItem *pItem;
+ ECfgSrcType stype;
+ int32_t numOfCores;
+ int64_t totalMemoryKB;
+
+ pItem = cfgGetItem(tsCfg, "numOfCores");
+ if (pItem == NULL) {
+ return -1;
+ } else {
+ stype = pItem->stype;
+ numOfCores = pItem->fval;
+ }
+
+ pItem = cfgGetItem(tsCfg, "supportVnodes");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSupportVnodes = numOfCores * 2;
+ tsNumOfSupportVnodes = TMAX(tsNumOfSupportVnodes, 2);
+ pItem->i32 = tsNumOfSupportVnodes;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfRpcThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfRpcThreads = numOfCores / 2;
+ tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4);
+ pItem->i32 = tsNumOfRpcThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfCommitThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfCommitThreads = numOfCores / 2;
+ tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
+ pItem->i32 = tsNumOfCommitThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfMnodeReadThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfMnodeReadThreads = numOfCores / 8;
+ tsNumOfMnodeReadThreads = TRANGE(tsNumOfMnodeReadThreads, 1, 4);
+ pItem->i32 = tsNumOfMnodeReadThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeQueryThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeQueryThreads = numOfCores * 2;
+ tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4);
+ pItem->i32 = tsNumOfVnodeQueryThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeStreamThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeStreamThreads = numOfCores / 4;
+ tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4);
+ pItem->i32 = tsNumOfVnodeStreamThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeFetchThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeFetchThreads = numOfCores / 4;
+ tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
+ pItem->i32 = tsNumOfVnodeFetchThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeWriteThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeWriteThreads = numOfCores;
+ tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
+ pItem->i32 = tsNumOfVnodeWriteThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeSyncThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeSyncThreads = numOfCores * 2;
+ tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
+ pItem->i32 = tsNumOfVnodeSyncThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeRsmaThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeRsmaThreads = numOfCores;
+ tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4);
+ pItem->i32 = tsNumOfVnodeRsmaThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfQnodeQueryThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfQnodeQueryThreads = numOfCores * 2;
+ tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
+ pItem->i32 = tsNumOfQnodeQueryThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfQnodeFetchThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfQnodeFetchThreads = numOfCores / 2;
+ tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4);
+ pItem->i32 = tsNumOfQnodeFetchThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfSnodeSharedThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSnodeSharedThreads = numOfCores / 4;
+ tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4);
+ pItem->i32 = tsNumOfSnodeSharedThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfSnodeUniqueThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSnodeUniqueThreads = numOfCores / 4;
+ tsNumOfSnodeUniqueThreads = TRANGE(tsNumOfSnodeUniqueThreads, 2, 4);
+ pItem->i32 = tsNumOfSnodeUniqueThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "totalMemoryKB");
+ if (pItem == NULL) {
+ return -1;
+ } else {
+ stype = pItem->stype;
+ totalMemoryKB = pItem->i64;
+ }
+
+ pItem = cfgGetItem(tsCfg, "rpcQueueMemoryAllowed");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsRpcQueueMemoryAllowed = totalMemoryKB * 1024 * 0.1;
+ tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL);
+ pItem->i64 = tsRpcQueueMemoryAllowed;
+ pItem->stype = stype;
+ }
+
+ return 0;
+}
+
+
static void taosSetClientLogCfg(SConfig *pCfg) {
SConfigItem *pItem = cfgGetItem(pCfg, "logDir");
tstrncpy(tsLogDir, cfgGetItem(pCfg, "logDir")->str, PATH_MAX);
@@ -981,7 +1127,7 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi
taosSetServerLogCfg(pCfg);
}
- taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32);
+ taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32, false);
if (taosMulMkDir(tsLogDir) != 0) {
uError("failed to create dir:%s since %s", tsLogDir, terrstr());
@@ -1048,6 +1194,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
if (taosSetClientCfg(tsCfg)) return -1;
} else {
if (taosSetClientCfg(tsCfg)) return -1;
+ if (taosUpdateServerCfg(tsCfg)) return -1;
if (taosSetServerCfg(tsCfg)) return -1;
if (taosSetTfsCfg(tsCfg) != 0) return -1;
}
@@ -1072,7 +1219,7 @@ void taosCleanupCfg() {
void taosCfgDynamicOptions(const char *option, const char *value) {
if (strncasecmp(option, "debugFlag", 9) == 0) {
int32_t flag = atoi(value);
- taosSetAllDebugFlag(flag);
+ taosSetAllDebugFlag(flag, true);
return;
}
@@ -1097,11 +1244,13 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
"tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag",
"tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
+ "jniDebugFlag",
};
int32_t *optionVars[] = {
&dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag,
&tqDebugFlag, &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag,
&tmrDebugFlag, &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag,
+ &jniDebugFlag,
};
int32_t optionSize = tListLen(options);
@@ -1113,41 +1262,42 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
int32_t flag = atoi(value);
uInfo("%s set from %d to %d", optName, *optionVars[d], flag);
*optionVars[d] = flag;
- taosSetDebugFlag(optionVars[d], optName, flag);
+ taosSetDebugFlag(optionVars[d], optName, flag, true);
return;
}
uError("failed to cfg dynamic option:%s value:%s", option, value);
}
-void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal) {
+void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite) {
SConfigItem *pItem = cfgGetItem(tsCfg, flagName);
- if (pItem != NULL) {
+ if (pItem != NULL && (rewrite || pItem->i32 == 0)) {
pItem->i32 = flagVal;
}
*pFlagPtr = flagVal;
}
-void taosSetAllDebugFlag(int32_t flag) {
+void taosSetAllDebugFlag(int32_t flag, bool rewrite) {
if (flag <= 0) return;
- taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag);
- taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag);
- taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag);
- taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag);
- taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag);
- taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag);
- taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag);
- taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag);
- taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag);
- taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag);
- taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag);
- taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag);
- taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag);
- taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag);
- taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag);
- taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag);
- taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag);
- taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag);
+ taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&metaDebugFlag, "tmrDebugFlag", flag, rewrite);
uInfo("all debug flag are set to %d", flag);
}
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index 618baa5b37..1921415239 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -3330,7 +3330,7 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
return 0;
}
-void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp*)pRsp)->pSchemas); }
+void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); }
void tFreeSTableIndexRsp(void *info) {
if (NULL == info) {
@@ -5119,17 +5119,17 @@ int tDecodeSVCreateTbRsp(SDecoder *pCoder, SVCreateTbRsp *pRsp) {
} else {
pRsp->pMeta = NULL;
}
-
+
tEndDecode(pCoder);
return 0;
}
-void tFreeSVCreateTbRsp(void* param) {
+void tFreeSVCreateTbRsp(void *param) {
if (NULL == param) {
return;
}
-
- SVCreateTbRsp* pRsp = (SVCreateTbRsp*)param;
+
+ SVCreateTbRsp *pRsp = (SVCreateTbRsp *)param;
if (pRsp->pMeta) {
taosMemoryFree(pRsp->pMeta->pSchemas);
taosMemoryFree(pRsp->pMeta);
@@ -5347,7 +5347,7 @@ static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
if (tDecodeI32v(pDecoder, &pBlock->numOfRows) < 0) return -1;
if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1;
if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1;
-
+
int32_t meta = 0;
if (tDecodeI32(pDecoder, &meta) < 0) return -1;
if (meta) {
@@ -5395,12 +5395,12 @@ int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) {
return 0;
}
-void tFreeSSubmitBlkRsp(void* param) {
+void tFreeSSubmitBlkRsp(void *param) {
if (NULL == param) {
return;
}
-
- SSubmitBlkRsp* pRsp = (SSubmitBlkRsp*)param;
+
+ SSubmitBlkRsp *pRsp = (SSubmitBlkRsp *)param;
taosMemoryFree(pRsp->tblFName);
if (pRsp->pMeta) {
@@ -5409,7 +5409,6 @@ void tFreeSSubmitBlkRsp(void* param) {
}
}
-
void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
if (NULL == pRsp) return;
@@ -5621,7 +5620,6 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp *pRsp) {
}
}
-
int32_t tEncodeSMCreateStbRsp(SEncoder *pEncoder, const SMCreateStbRsp *pRsp) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->pMeta->pSchemas ? 1 : 0) < 0) return -1;
@@ -5673,8 +5671,6 @@ void tFreeSMCreateStbRsp(SMCreateStbRsp *pRsp) {
}
}
-
-
int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal) {
if (tEncodeI8(pEncoder, pOffsetVal->type) < 0) return -1;
if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
@@ -5692,7 +5688,7 @@ int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal)
int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) {
if (tDecodeI8(pDecoder, &pOffsetVal->type) < 0) return -1;
- if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
+ if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
if (tDecodeI64(pDecoder, &pOffsetVal->uid) < 0) return -1;
if (tDecodeI64(pDecoder, &pOffsetVal->ts) < 0) return -1;
} else if (pOffsetVal->type == TMQ_OFFSET__LOG) {
@@ -5714,7 +5710,7 @@ int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
snprintf(buf, maxLen, "offset(reset to latest)");
} else if (pVal->type == TMQ_OFFSET__LOG) {
snprintf(buf, maxLen, "offset(log) ver:%" PRId64, pVal->version);
- } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
+ } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
snprintf(buf, maxLen, "offset(ss data) uid:%" PRId64 ", ts:%" PRId64, pVal->uid, pVal->ts);
} else {
ASSERT(0);
@@ -5815,17 +5811,17 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
return 0;
}
-int32_t tEncodeSMqMetaRsp(SEncoder* pEncoder, const SMqMetaRsp* pRsp) {
+int32_t tEncodeSMqMetaRsp(SEncoder *pEncoder, const SMqMetaRsp *pRsp) {
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
- if(tEncodeI16(pEncoder, pRsp->resMsgType)) return -1;
- if(tEncodeBinary(pEncoder, pRsp->metaRsp, pRsp->metaRspLen)) return -1;
+ if (tEncodeI16(pEncoder, pRsp->resMsgType)) return -1;
+ if (tEncodeBinary(pEncoder, pRsp->metaRsp, pRsp->metaRspLen)) return -1;
return 0;
}
-int32_t tDecodeSMqMetaRsp(SDecoder* pDecoder, SMqMetaRsp* pRsp) {
+int32_t tDecodeSMqMetaRsp(SDecoder *pDecoder, SMqMetaRsp *pRsp) {
if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
if (tDecodeI16(pDecoder, &pRsp->resMsgType) < 0) return -1;
- if (tDecodeBinaryAlloc(pDecoder, &pRsp->metaRsp, (uint64_t*)&pRsp->metaRspLen) < 0) return -1;
+ if (tDecodeBinaryAlloc(pDecoder, &pRsp->metaRsp, (uint64_t *)&pRsp->metaRspLen) < 0) return -1;
return 0;
}
@@ -5895,6 +5891,99 @@ int32_t tDecodeSMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
return 0;
}
+void tDeleteSMqDataRsp(SMqDataRsp *pRsp) {
+ taosArrayDestroy(pRsp->blockDataLen);
+ taosArrayDestroyP(pRsp->blockData, (FDelete)taosMemoryFree);
+ taosArrayDestroyP(pRsp->blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree);
+}
+
+int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) {
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1;
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->blockNum) < 0) return -1;
+ if (pRsp->blockNum != 0) {
+ if (tEncodeI8(pEncoder, pRsp->withTbName) < 0) return -1;
+ if (tEncodeI8(pEncoder, pRsp->withSchema) < 0) return -1;
+
+ for (int32_t i = 0; i < pRsp->blockNum; i++) {
+ int32_t bLen = *(int32_t *)taosArrayGet(pRsp->blockDataLen, i);
+ void *data = taosArrayGetP(pRsp->blockData, i);
+ if (tEncodeBinary(pEncoder, (const uint8_t *)data, bLen) < 0) return -1;
+ if (pRsp->withSchema) {
+ SSchemaWrapper *pSW = (SSchemaWrapper *)taosArrayGetP(pRsp->blockSchema, i);
+ if (tEncodeSSchemaWrapper(pEncoder, pSW) < 0) return -1;
+ }
+ if (pRsp->withTbName) {
+ char *tbName = (char *)taosArrayGetP(pRsp->blockTbName, i);
+ if (tEncodeCStr(pEncoder, tbName) < 0) return -1;
+ }
+ }
+ }
+ if (tEncodeI32(pEncoder, pRsp->createTableNum) < 0) return -1;
+ if (pRsp->createTableNum) {
+ for (int32_t i = 0; i < pRsp->createTableNum; i++) {
+ void *createTableReq = taosArrayGetP(pRsp->createTableReq, i);
+ int32_t createTableLen = *(int32_t *)taosArrayGet(pRsp->createTableLen, i);
+ if (tEncodeBinary(pEncoder, createTableReq, createTableLen) < 0) return -1;
+ }
+ }
+ return 0;
+}
+
+int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) {
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->reqOffset) < 0) return -1;
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pRsp->blockNum) < 0) return -1;
+ if (pRsp->blockNum != 0) {
+ pRsp->blockData = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ pRsp->blockDataLen = taosArrayInit(pRsp->blockNum, sizeof(int32_t));
+ if (tDecodeI8(pDecoder, &pRsp->withTbName) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pRsp->withSchema) < 0) return -1;
+ if (pRsp->withTbName) {
+ pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ }
+ if (pRsp->withSchema) {
+ pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ }
+
+ for (int32_t i = 0; i < pRsp->blockNum; i++) {
+ void *data;
+ uint64_t bLen;
+ if (tDecodeBinaryAlloc(pDecoder, &data, &bLen) < 0) return -1;
+ taosArrayPush(pRsp->blockData, &data);
+ int32_t len = bLen;
+ taosArrayPush(pRsp->blockDataLen, &len);
+
+ if (pRsp->withSchema) {
+ SSchemaWrapper *pSW = (SSchemaWrapper *)taosMemoryCalloc(1, sizeof(SSchemaWrapper));
+ if (pSW == NULL) return -1;
+ if (tDecodeSSchemaWrapper(pDecoder, pSW) < 0) return -1;
+ taosArrayPush(pRsp->blockSchema, &pSW);
+ }
+
+ if (pRsp->withTbName) {
+ char *tbName;
+ if (tDecodeCStrAlloc(pDecoder, &tbName) < 0) return -1;
+ taosArrayPush(pRsp->blockTbName, &tbName);
+ }
+ }
+ }
+ if (tDecodeI32(pDecoder, &pRsp->createTableNum) < 0) return -1;
+ if (pRsp->createTableNum) {
+ pRsp->createTableLen = taosArrayInit(pRsp->createTableNum, sizeof(int32_t));
+ pRsp->createTableReq = taosArrayInit(pRsp->createTableNum, sizeof(void *));
+ for (int32_t i = 0; i < pRsp->createTableNum; i++) {
+ void *pCreate = NULL;
+ uint64_t len;
+ if (tDecodeBinaryAlloc(pDecoder, &pCreate, &len) < 0) return -1;
+ int32_t l = (int32_t)len;
+ taosArrayPush(pRsp->createTableLen, &l);
+ taosArrayPush(pRsp->createTableReq, &pCreate);
+ }
+ }
+ return 0;
+}
int32_t tEncodeSSingleDeleteReq(SEncoder *pEncoder, const SSingleDeleteReq *pReq) {
if (tEncodeI64(pEncoder, pReq->uid) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->ts) < 0) return -1;
diff --git a/source/common/src/tvariant.c b/source/common/src/tvariant.c
index 0810be1497..a01c393441 100644
--- a/source/common/src/tvariant.c
+++ b/source/common/src/tvariant.c
@@ -155,8 +155,8 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
void taosVariantDestroy(SVariant *pVar) {
if (pVar == NULL) return;
- if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR
- || pVar->nType == TSDB_DATA_TYPE_JSON) {
+ if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR ||
+ pVar->nType == TSDB_DATA_TYPE_JSON) {
taosMemoryFreeClear(pVar->pz);
pVar->nLen = 0;
}
@@ -185,8 +185,8 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) {
if (pSrc == NULL || pDst == NULL) return;
pDst->nType = pSrc->nType;
- if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR
- || pSrc->nType == TSDB_DATA_TYPE_JSON) {
+ if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR ||
+ pSrc->nType == TSDB_DATA_TYPE_JSON) {
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
char *p = taosMemoryRealloc(pDst->pz, len);
assert(p);
diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
index a059db6b00..b91b82b72e 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
@@ -301,7 +301,7 @@ int32_t dmInitServer(SDnode *pDnode) {
SDnodeTrans *pTrans = &pDnode->trans;
SRpcInit rpcInit = {0};
- strncpy(rpcInit.localFqdn, tsLocalFqdn, strlen(tsLocalFqdn));
+ strncpy(rpcInit.localFqdn, tsLocalFqdn, TSDB_FQDN_LEN);
rpcInit.localPort = tsServerPort;
rpcInit.label = "DND-S";
rpcInit.numOfThreads = tsNumOfRpcThreads;
diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c
index 9499c90c57..5a998dfe98 100644
--- a/source/dnode/mnode/impl/src/mndShow.c
+++ b/source/dnode/mnode/impl/src/mndShow.c
@@ -88,7 +88,7 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
type = TSDB_MGMT_TABLE_VGROUP;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_CONSUMERS, len) == 0) {
type = TSDB_MGMT_TABLE_CONSUMERS;
- } else if (strncasecmp(name, TSDB_PERFS_TABLE_SUBSCRIPTIONS, len) == 0) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_SUBSCRIPTIONS, len) == 0) {
type = TSDB_MGMT_TABLE_SUBSCRIPTIONS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_TRANS, len) == 0) {
type = TSDB_MGMT_TABLE_TRANS;
@@ -102,9 +102,9 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
type = TSDB_MGMT_TABLE_QUERIES;
} else if (strncasecmp(name, TSDB_INS_TABLE_VNODES, len) == 0) {
type = TSDB_MGMT_TABLE_VNODES;
- } else if (strncasecmp(name, TSDB_PERFS_TABLE_TOPICS, len) == 0) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_TOPICS, len) == 0) {
type = TSDB_MGMT_TABLE_TOPICS;
- } else if (strncasecmp(name, TSDB_PERFS_TABLE_STREAMS, len) == 0) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_STREAMS, len) == 0) {
type = TSDB_MGMT_TABLE_STREAMS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_APPS, len) == 0) {
type = TSDB_MGMT_TABLE_APPS;
diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c
index 10e520d9ec..1452c5ae2f 100644
--- a/source/dnode/mnode/impl/src/mndSubscribe.c
+++ b/source/dnode/mnode/impl/src/mndSubscribe.c
@@ -287,6 +287,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
if (consumerVgNum > minVgCnt) {
if (imbCnt < imbConsumerNum) {
if (consumerVgNum == minVgCnt + 1) {
+ imbCnt++;
continue;
} else {
// pop until equal minVg + 1
diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c
index ff208eae60..eb072d013d 100644
--- a/source/dnode/mnode/impl/src/mndTopic.c
+++ b/source/dnode/mnode/impl/src/mndTopic.c
@@ -763,8 +763,9 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
int32_t cols = 0;
char topicName[TSDB_TOPIC_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
- tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB);
- tNameGetDbName(&n, varDataVal(topicName));
+ strcpy(varDataVal(topicName), mndGetDbStr(pTopic->name));
+ /*tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB);*/
+ /*tNameGetDbName(&n, varDataVal(topicName));*/
varDataSetLen(topicName, strlen(varDataVal(topicName)));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)topicName, false);
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index 3a3cbe72ba..5d4285b7c2 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -157,17 +157,17 @@ void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
size_t tsdbCacheGetCapacity(SVnode *pVnode);
// tq
-typedef struct SMetaTableInfo{
+typedef struct SMetaTableInfo {
int64_t suid;
int64_t uid;
SSchemaWrapper *schema;
char tbName[TSDB_TABLE_NAME_LEN];
-}SMetaTableInfo;
+} SMetaTableInfo;
-typedef struct SIdInfo{
- int64_t version;
- int32_t index;
-}SIdInfo;
+typedef struct SIdInfo {
+ int64_t version;
+ int32_t index;
+} SIdInfo;
typedef struct SSnapContext {
SMeta *pMeta;
@@ -180,8 +180,8 @@ typedef struct SSnapContext {
SArray *idList;
int32_t index;
bool withMeta;
- bool queryMetaOrData; // true-get meta, false-get data
-}SSnapContext;
+ bool queryMetaOrData; // true-get meta, false-get data
+} SSnapContext;
typedef struct STqReader {
int64_t ver;
@@ -232,11 +232,12 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWr
int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *pSnapshot);
int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData);
-int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta, SSnapContext** ctxRet);
-int32_t getMetafromSnapShot(SSnapContext* ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid);
-SMetaTableInfo getUidfromSnapShot(SSnapContext* ctx);
-int32_t setForSnapShot(SSnapContext* ctx, int64_t uid);
-int32_t destroySnapContext(SSnapContext* ctx);
+int32_t buildSnapContext(SMeta *pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta,
+ SSnapContext **ctxRet);
+int32_t getMetafromSnapShot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid);
+SMetaTableInfo getUidfromSnapShot(SSnapContext *ctx);
+int32_t setForSnapShot(SSnapContext *ctx, int64_t uid);
+int32_t destroySnapContext(SSnapContext *ctx);
// structs
struct STsdbCfg {
@@ -259,6 +260,7 @@ typedef struct {
int64_t numOfNTables;
int64_t numOfNTimeSeries;
int64_t numOfTimeSeries;
+ int64_t itvTimeSeries;
int64_t pointsWritten;
int64_t totalStorage;
int64_t compStorage;
diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h
index abfffc045f..c29c4cb6c4 100644
--- a/source/dnode/vnode/src/inc/sma.h
+++ b/source/dnode/vnode/src/inc/sma.h
@@ -95,6 +95,7 @@ struct SRSmaStat {
int64_t refId; // shared by fetch tasks
volatile int64_t nBufItems; // number of items in queue buffer
SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo)
+ volatile int32_t nFetchAll; // active number of fetch all
int8_t triggerStat; // shared by fetch tasks
int8_t commitStat; // 0 not in committing, 1 in committing
SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w)
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index a97c8ff132..19dd321814 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -67,21 +67,21 @@ typedef struct {
// tqExec
typedef struct {
- char* qmsg;
+ char* qmsg;
} STqExecCol;
typedef struct {
- int64_t suid;
+ int64_t suid;
} STqExecTb;
typedef struct {
- SHashObj* pFilterOutTbUid;
+ SHashObj* pFilterOutTbUid;
} STqExecDb;
typedef struct {
int8_t subType;
- STqReader* pExecReader;
+ STqReader* pExecReader;
qTaskInfo_t task;
union {
STqExecCol execCol;
@@ -144,7 +144,7 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp*
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum);
// tqExec
-int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp);
+int32_t tqLogScanExec(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, SMqDataRsp* pRsp);
int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp);
// tqMeta
@@ -175,22 +175,6 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
char* tqOffsetBuildFName(const char* path, int32_t ver);
int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname);
-static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) {
- pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA;
- pOffsetVal->uid = uid;
- pOffsetVal->ts = ts;
-}
-
-static FORCE_INLINE void tqOffsetResetToMeta(STqOffsetVal* pOffsetVal, int64_t uid) {
- pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_META;
- pOffsetVal->uid = uid;
-}
-
-static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) {
- pOffsetVal->type = TMQ_OFFSET__LOG;
- pOffsetVal->version = ver;
-}
-
// tqStream
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask);
diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c
index 9d3b4d82eb..7df355a59b 100644
--- a/source/dnode/vnode/src/meta/metaQuery.c
+++ b/source/dnode/vnode/src/meta/metaQuery.c
@@ -615,9 +615,13 @@ int64_t metaGetTbNum(SMeta *pMeta) {
// N.B. Called by statusReq per second
int64_t metaGetTimeSeriesNum(SMeta *pMeta) {
// sum of (number of columns of stable - 1) * number of ctables (excluding timestamp column)
- int64_t num = 0;
- vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
- pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
+ if (pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 || ++pMeta->pVnode->config.vndStats.itvTimeSeries % 60 == 0) {
+ int64_t num = 0;
+ vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
+ pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
+
+ pMeta->pVnode->config.vndStats.itvTimeSeries = 0;
+ }
return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries;
}
@@ -890,7 +894,7 @@ const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
#ifdef TAG_FILTER_DEBUG
if (IS_VAR_DATA_TYPE(val->type)) {
- char* buf = taosMemoryCalloc(val->nData + 1, 1);
+ char *buf = taosMemoryCalloc(val->nData + 1, 1);
memcpy(buf, val->pData, val->nData);
metaDebug("metaTag table val varchar index:%d cid:%d type:%d value:%s", 1, val->cid, val->type, buf);
taosMemoryFree(buf);
@@ -900,13 +904,13 @@ const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
metaDebug("metaTag table val number index:%d cid:%d type:%d value:%f", 1, val->cid, val->type, dval);
}
- SArray* pTagVals = NULL;
- tTagToValArray((STag*)pTag, &pTagVals);
+ SArray *pTagVals = NULL;
+ tTagToValArray((STag *)pTag, &pTagVals);
for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
- STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i);
+ STagVal *pTagVal = (STagVal *)taosArrayGet(pTagVals, i);
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
- char* buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
+ char *buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
memcpy(buf, pTagVal->pData, pTagVal->nData);
metaDebug("metaTag table varchar index:%d cid:%d type:%d value:%s", i, pTagVal->cid, pTagVal->type, buf);
taosMemoryFree(buf);
diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c
index ca5367f397..3cf50a035a 100644
--- a/source/dnode/vnode/src/sma/smaCommit.c
+++ b/source/dnode/vnode/src/sma/smaCommit.c
@@ -172,7 +172,7 @@ static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
TdDirPtr pDir = NULL;
TdDirEntryPtr pDirEntry = NULL;
char dir[TSDB_FILENAME_LEN];
- const char *pattern = "v[0-9]+qtaskinfo\\.ver([0-9]+)?$";
+ const char *pattern = "v[0-9]+qinf\\.v([0-9]+)?$";
regex_t regex;
int code = 0;
@@ -312,15 +312,22 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
SSmaStat *pStat = SMA_ENV_STAT(pEnv);
SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat);
+ int32_t nLoops = 0;
// step 1: set rsma stat
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
- atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 1);
+ while (atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 1) != 0) {
+ ++nLoops;
+ if (nLoops > 1000) {
+ sched_yield();
+ nLoops = 0;
+ }
+ }
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
ASSERT(pRSmaStat->commitAppliedVer > 0);
// step 2: wait for all triggered fetch tasks to finish
- int32_t nLoops = 0;
+ nLoops = 0;
while (1) {
if (T_REF_VAL_GET(pStat) == 0) {
smaDebug("vgId:%d, rsma commit, fetch tasks are all finished", SMA_VID(pSma));
@@ -344,7 +351,8 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
return TSDB_CODE_FAILED;
}
- smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma), (void*)taosGetSelfPthreadId());
+ smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma),
+ (void *)taosGetSelfPthreadId());
nLoops = 0;
while (atomic_load_64(&pRSmaStat->nBufItems) > 0) {
++nLoops;
@@ -359,7 +367,7 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
smaInfo("vgId:%d, rsma commit, operator state commited, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
-#if 0 // consuming task of qTaskInfo clone
+#if 0 // consuming task of qTaskInfo clone
// step 4: swap queue/qall and iQueue/iQall
// lock
// taosWLockLatch(SMA_ENV_LOCK(pEnv));
diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c
index 235fb1f941..3c3097bb2f 100644
--- a/source/dnode/vnode/src/sma/smaOpen.c
+++ b/source/dnode/vnode/src/sma/smaOpen.c
@@ -16,17 +16,17 @@
#include "sma.h"
#include "tsdb.h"
-static int32_t smaEvalDays(SRetention *r, int8_t precision);
-static int32_t smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type);
+static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration);
+static int32_t smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type);
static int32_t rsmaRestore(SSma *pSma);
-#define SMA_SET_KEEP_CFG(l) \
+#define SMA_SET_KEEP_CFG(v, l) \
do { \
SRetention *r = &pCfg->retentions[l]; \
pKeepCfg->keep2 = convertTimeFromPrecisionToUnit(r->keep, pCfg->precision, TIME_UNIT_MINUTE); \
pKeepCfg->keep0 = pKeepCfg->keep2; \
pKeepCfg->keep1 = pKeepCfg->keep2; \
- pKeepCfg->days = smaEvalDays(r, pCfg->precision); \
+ pKeepCfg->days = smaEvalDays(v, pCfg->retentions, l, pCfg->precision, pCfg->days); \
} while (0)
#define SMA_OPEN_RSMA_IMPL(v, l) \
@@ -38,51 +38,78 @@ static int32_t rsmaRestore(SSma *pSma);
} \
break; \
} \
- smaSetKeepCfg(&keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
+ smaSetKeepCfg(v, &keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
if (tsdbOpen(v, &SMA_RSMA_TSDB##l(pSma), VNODE_RSMA##l##_DIR, &keepCfg) < 0) { \
goto _err; \
} \
} while (0)
-#define RETENTION_DAYS_SPLIT_RATIO 10
-#define RETENTION_DAYS_SPLIT_MIN 1
-#define RETENTION_DAYS_SPLIT_MAX 30
+/**
+ * @brief Evaluate days(duration) for rsma level 1/2/3.
+ * 1) level 1: duration from "create database"
+ * 2) level 2/3: duration * (freq/freqL1)
+ * @param pVnode
+ * @param r
+ * @param level
+ * @param precision
+ * @param duration
+ * @return int32_t
+ */
+static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration) {
+ int32_t freqDuration = convertTimeFromPrecisionToUnit((r + TSDB_RETENTION_L0)->freq, precision, TIME_UNIT_MINUTE);
+ int32_t keepDuration = convertTimeFromPrecisionToUnit((r + TSDB_RETENTION_L0)->keep, precision, TIME_UNIT_MINUTE);
+ int32_t days = duration; // min
-static int32_t smaEvalDays(SRetention *r, int8_t precision) {
- int32_t keepDays = convertTimeFromPrecisionToUnit(r->keep, precision, TIME_UNIT_DAY);
- int32_t freqDays = convertTimeFromPrecisionToUnit(r->freq, precision, TIME_UNIT_DAY);
-
- int32_t days = keepDays / RETENTION_DAYS_SPLIT_RATIO;
- if (days <= RETENTION_DAYS_SPLIT_MIN) {
- days = RETENTION_DAYS_SPLIT_MIN;
- if (days < freqDays) {
- days = freqDays + 1;
- }
- } else {
- if (days > RETENTION_DAYS_SPLIT_MAX) {
- days = RETENTION_DAYS_SPLIT_MAX;
- }
- if (days < freqDays) {
- days = freqDays + 1;
- }
+ if (days < freqDuration) {
+ days = freqDuration;
}
- return days * 1440;
+
+ if (days > keepDuration) {
+ days = keepDuration;
+ }
+
+ if (level == TSDB_RETENTION_L0) {
+ goto end;
+ }
+
+ ASSERT(level >= TSDB_RETENTION_L1 && level <= TSDB_RETENTION_L2);
+
+ freqDuration = convertTimeFromPrecisionToUnit((r + level)->freq, precision, TIME_UNIT_MINUTE);
+ keepDuration = convertTimeFromPrecisionToUnit((r + level)->keep, precision, TIME_UNIT_MINUTE);
+
+ int32_t nFreqTimes = (r + level)->freq / (r + TSDB_RETENTION_L0)->freq;
+ days *= (nFreqTimes > 1 ? nFreqTimes : 1);
+
+ if (days > keepDuration) {
+ days = keepDuration;
+ }
+
+ if (days > TSDB_MAX_DURATION_PER_FILE) {
+ days = TSDB_MAX_DURATION_PER_FILE;
+ }
+
+ if (days < freqDuration) {
+ days = freqDuration;
+ }
+end:
+ smaInfo("vgId:%d, evaluated duration for level %" PRIi8 " is %d, raw val:%d", TD_VID(pVnode), level + 1, days, duration);
+ return days;
}
-int smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) {
+int smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) {
pKeepCfg->precision = pCfg->precision;
switch (type) {
case TSDB_TYPE_TSMA:
ASSERT(0);
break;
case TSDB_TYPE_RSMA_L0:
- SMA_SET_KEEP_CFG(0);
+ SMA_SET_KEEP_CFG(pVnode, 0);
break;
case TSDB_TYPE_RSMA_L1:
- SMA_SET_KEEP_CFG(1);
+ SMA_SET_KEEP_CFG(pVnode, 1);
break;
case TSDB_TYPE_RSMA_L2:
- SMA_SET_KEEP_CFG(2);
+ SMA_SET_KEEP_CFG(pVnode, 2);
break;
default:
ASSERT(0);
@@ -148,11 +175,11 @@ int32_t smaClose(SSma *pSma) {
/**
* @brief rsma env restore
- *
- * @param pSma
- * @param type
- * @param committedVer
- * @return int32_t
+ *
+ * @param pSma
+ * @param type
+ * @param committedVer
+ * @return int32_t
*/
int32_t tdRsmaRestore(SSma *pSma, int8_t type, int64_t committedVer) {
ASSERT(VND_IS_RSMA(pSma->pVnode));
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index 426ab521fd..f2063e3067 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -21,17 +21,17 @@
#define RSMA_SUBMIT_BATCH_SIZE (1024) // cnt
#define RSMA_FETCH_DELAY_MAX (900000) // ms
#define RSMA_FETCH_ACTIVE_MAX (1800) // ms
+#define RSMA_FETCH_INTERVAL (5000) // ms
SSmaMgmt smaMgmt = {
.inited = 0,
.rsetId = -1,
};
-#define TD_QTASKINFO_FNAME_PREFIX "qtaskinfo.ver"
-#define TD_RSMAINFO_DEL_FILE "rsmainfo.del"
+#define TD_QTASKINFO_FNAME_PREFIX "qinf.v"
+
typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem;
typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter;
-typedef struct SRSmaExecQItem SRSmaExecQItem;
static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid);
static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids);
@@ -82,11 +82,6 @@ struct SRSmaQTaskInfoIter {
int32_t nBufPos;
};
-struct SRSmaExecQItem {
- void *pRSmaInfo;
- void *qall;
-};
-
void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t version, char *outputName) {
tdGetVndFileName(vgId, NULL, VNODE_RSMA_DIR, TD_QTASKINFO_FNAME_PREFIX, version, outputName);
}
@@ -1083,9 +1078,6 @@ static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTa
goto _err;
}
- SSmaEnv *pRSmaEnv = pSma->pRSmaEnv;
- SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pRSmaEnv);
-
SRSmaQTaskInfoIter fIter = {0};
if (tdRSmaQTaskInfoIterInit(&fIter, &tFile) < 0) {
tdRSmaQTaskInfoIterDestroy(&fIter);
@@ -1501,13 +1493,13 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
switch (rsmaTriggerStat) {
case TASK_TRIGGER_STAT_PAUSED:
case TASK_TRIGGER_STAT_CANCELLED: {
- tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
smaDebug("vgId:%d, rsma fetch task not start for level %" PRIi8 " since stat is %" PRIi8
", rsetId rsetId:%" PRIi64 " refId:%d",
SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaInfo->refId);
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
- taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+ taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
}
+ tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
return;
}
default:
@@ -1518,7 +1510,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE);
switch (fetchTriggerStat) {
case TASK_TRIGGER_STAT_ACTIVE: {
- smaDebug("vgId:%d, rsma fetch task started for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
+ smaDebug("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
// async process
pItem->fetchLevel = pItem->level;
@@ -1531,8 +1523,6 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
if (atomic_load_8(&pRSmaInfo->assigned) == 0) {
tsem_post(&(pStat->notEmpty));
}
- smaInfo("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64, SMA_VID(pSma), pItem->level,
- pRSmaInfo->suid);
} break;
case TASK_TRIGGER_STAT_PAUSED: {
smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is paused",
@@ -1715,21 +1705,36 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
smaDebug("vgId:%d, batchSize:%d, execType:%" PRIi8, SMA_VID(pSma), qallItemSize, type);
}
- if (type == RSMA_EXEC_OVERFLOW) {
+ int8_t oldStat = atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 2);
+ if (oldStat == 0 ||
+ ((oldStat == 2) && atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat)) < TASK_TRIGGER_STAT_PAUSED)) {
+ atomic_fetch_add_32(&pRSmaStat->nFetchAll, 1);
tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
+ if (0 == atomic_sub_fetch_32(&pRSmaStat->nFetchAll, 1)) {
+ atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0);
+ }
}
if (qallItemSize > 0) {
atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize);
continue;
} else if (RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
- continue;
+ if (atomic_load_8(RSMA_COMMIT_STAT(pRSmaStat)) == 0) {
+ continue;
+ }
+ for (int32_t j = 0; j < TSDB_RETENTION_L2; ++j) {
+ SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, j);
+ if (pItem->fetchLevel) {
+ pItem->fetchLevel = 0;
+ taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+ }
+ }
}
break;
}
}
- ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0));
+ atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0);
}
}
if (type == RSMA_EXEC_COMMIT) {
@@ -1758,7 +1763,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
}
// tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
- ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0));
+ atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0);
}
}
ASSERT(taosQueueItemSize(pInfo->iQueue) == 0);
@@ -1775,7 +1780,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
if (pEnv->flag & SMA_ENV_FLG_CLOSE) {
break;
}
-
+
tsem_wait(&pRSmaStat->notEmpty);
if ((pEnv->flag & SMA_ENV_FLG_CLOSE) && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) {
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 54f764c6b3..eed997b486 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -357,8 +357,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
TD_VID(pTq->pVnode), formatBuf);
} else {
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
- if (pReq->useSnapshot){
- if (pHandle->fetchMeta){
+ if (pReq->useSnapshot) {
+ if (pHandle->fetchMeta) {
tqOffsetResetToMeta(&fetchOffsetNew, 0);
} else {
tqOffsetResetToData(&fetchOffsetNew, 0, 0);
@@ -373,43 +373,47 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1;
}
- goto OVER;
+ tDeleteSMqDataRsp(&dataRsp);
+ return code;
} else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) {
tqError("tmq poll: subkey %s, no offset committed for consumer %" PRId64
" in vg %d, subkey %s, reset none failed",
pHandle->subKey, consumerId, TD_VID(pTq->pVnode), pReq->subKey);
terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET;
code = -1;
- goto OVER;
+ tDeleteSMqDataRsp(&dataRsp);
+ return code;
}
}
}
- if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN || fetchOffsetNew.type != TMQ_OFFSET__LOG){
+ if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN || fetchOffsetNew.type != TMQ_OFFSET__LOG) {
SMqMetaRsp metaRsp = {0};
tqScan(pTq, pHandle, &dataRsp, &metaRsp, &fetchOffsetNew);
- if(metaRsp.metaRspLen > 0){
+ if (metaRsp.metaRspLen > 0) {
if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) {
code = -1;
}
- tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send meta offset type:%d,uid:%ld,version:%ld", consumerId, pHandle->subKey,
- TD_VID(pTq->pVnode), metaRsp.rspOffset.type, metaRsp.rspOffset.uid, metaRsp.rspOffset.version);
+ tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send meta offset type:%d,uid:%ld,version:%ld", consumerId,
+ pHandle->subKey, TD_VID(pTq->pVnode), metaRsp.rspOffset.type, metaRsp.rspOffset.uid,
+ metaRsp.rspOffset.version);
taosMemoryFree(metaRsp.metaRsp);
goto OVER;
}
- if (dataRsp.blockNum > 0){
+ if (dataRsp.blockNum > 0) {
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1;
}
goto OVER;
- }else{
+ } else {
fetchOffsetNew = dataRsp.rspOffset;
}
- tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d,uid:%ld,version:%ld", consumerId, pHandle->subKey,
- TD_VID(pTq->pVnode), dataRsp.blockNum, dataRsp.rspOffset.type, dataRsp.rspOffset.uid, dataRsp.rspOffset.version);
+ tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d,uid:%ld,version:%ld",
+ consumerId, pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.blockNum, dataRsp.rspOffset.type,
+ dataRsp.rspOffset.uid, dataRsp.rspOffset.version);
}
if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN && fetchOffsetNew.type == TMQ_OFFSET__LOG) {
@@ -426,7 +430,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
consumerEpoch = atomic_load_32(&pHandle->epoch);
if (consumerEpoch > reqEpoch) {
tqWarn("tmq poll: consumer %" PRId64 " (epoch %d), subkey %s, vg %d offset %" PRId64
- ", found new consumer epoch %d, discard req epoch %d",
+ ", found new consumer epoch %d, discard req epoch %d",
consumerId, pReq->epoch, pHandle->subKey, TD_VID(pTq->pVnode), fetchVer, consumerEpoch, reqEpoch);
break;
}
@@ -449,7 +453,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
if (pHead->msgType == TDMT_VND_SUBMIT) {
SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
- if (tqLogScanExec(pTq, &pHandle->execHandle, pCont, &dataRsp) < 0) {
+ if (tqLogScanExec(pTq, pHandle, pCont, &dataRsp) < 0) {
/*ASSERT(0);*/
}
// TODO batch optimization:
@@ -490,18 +494,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
OVER:
if (pCkHead) taosMemoryFree(pCkHead);
- // TODO wrap in destroy func
- taosArrayDestroy(dataRsp.blockDataLen);
- taosArrayDestroyP(dataRsp.blockData, (FDelete)taosMemoryFree);
-
- if (dataRsp.withSchema) {
- taosArrayDestroyP(dataRsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
- }
-
- if (dataRsp.withTbName) {
- taosArrayDestroyP(dataRsp.blockTbName, (FDelete)taosMemoryFree);
- }
-
+ tDeleteSMqDataRsp(&dataRsp);
return code;
}
@@ -629,9 +622,9 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
tqReaderSetTbUidList(pHandle->execHandle.pExecReader, tbUidList);
taosArrayDestroy(tbUidList);
- buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta, (SSnapContext **)(&handle.sContext));
- pHandle->execHandle.task =
- qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL);
+ buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta,
+ (SSnapContext**)(&handle.sContext));
+ pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL);
}
taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId);
diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c
index a0b8141cfb..da596d07f9 100644
--- a/source/dnode/vnode/src/tq/tqExec.c
+++ b/source/dnode/vnode/src/tq/tqExec.c
@@ -60,6 +60,46 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp) {
return 0;
}
+int64_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) {
+ const STqExecHandle* pExec = &pHandle->execHandle;
+ ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN);
+
+ qTaskInfo_t task = pExec->task;
+
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
+ tqDebug("prepare scan failed, return");
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ pRsp->rspOffset = *pOffset;
+ return 0;
+ } else {
+ tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
+ tqDebug("prepare scan failed, return");
+ pRsp->rspOffset = *pOffset;
+ return 0;
+ }
+ }
+ }
+
+ int32_t rowCnt = 0;
+ while (1) {
+ SSDataBlock* pDataBlock = NULL;
+ uint64_t ts = 0;
+ tqDebug("tmq task start to execute");
+ if (qExecTask(task, &pDataBlock, &ts) < 0) {
+ ASSERT(0);
+ }
+ tqDebug("tmq task execute end, get %p", pDataBlock);
+
+ if (pDataBlock) {
+ tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
+ pRsp->blockNum++;
+ }
+ }
+
+ return 0;
+}
+
int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* pOffset) {
const STqExecHandle* pExec = &pHandle->execHandle;
qTaskInfo_t task = pExec->task;
@@ -102,18 +142,18 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp*
taosArrayPush(pRsp->blockTbName, &tbName);
}
}
- if(pRsp->withSchema){
+ if (pRsp->withSchema) {
if (pOffset->type == TMQ_OFFSET__LOG) {
tqAddBlockSchemaToRsp(pExec, pRsp);
- }else{
+ } else {
SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task));
taosArrayPush(pRsp->blockSchema, &pSW);
}
}
- if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN){
+ if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
- }else{
+ } else {
tqAddBlockDataToRsp(pDataBlock, pRsp, taosArrayGetSize(pDataBlock->pDataBlock));
}
pRsp->blockNum++;
@@ -125,17 +165,9 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp*
}
}
- if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN){
- if (pRsp->blockNum == 0 && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
- tqDebug("vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode),
- pHandle->snapshotVer + 1);
- tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
- qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType);
- continue;
- }
- }else{
- if (pDataBlock == NULL && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA){
- if(qStreamExtractPrepareUid(task) != 0){
+ if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN) {
+ if (pDataBlock == NULL && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (qStreamExtractPrepareUid(task) != 0) {
continue;
}
tqDebug("tmqsnap vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode),
@@ -143,13 +175,13 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp*
break;
}
- if (pRsp->blockNum > 0){
+ if (pRsp->blockNum > 0) {
tqDebug("tmqsnap task exec exited, get data");
break;
}
SMqMetaRsp* tmp = qStreamExtractMetaMsg(task);
- if(tmp->rspOffset.type == TMQ_OFFSET__SNAPSHOT_DATA){
+ if (tmp->rspOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) {
tqOffsetResetToData(pOffset, tmp->rspOffset.uid, tmp->rspOffset.ts);
qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType);
tmp->rspOffset.type = TMQ_OFFSET__SNAPSHOT_META;
@@ -173,57 +205,8 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp*
return 0;
}
-#if 0
-int32_t tqScanSnapshot(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal offset, int32_t workerId) {
- ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN);
- qTaskInfo_t task = pExec->execCol.task[workerId];
-
- if (qStreamPrepareTsdbScan(task, offset.uid, offset.ts) < 0) {
- ASSERT(0);
- }
-
- int32_t rowCnt = 0;
- while (1) {
- SSDataBlock* pDataBlock = NULL;
- uint64_t ts = 0;
- if (qExecTask(task, &pDataBlock, &ts) < 0) {
- ASSERT(0);
- }
- if (pDataBlock == NULL) break;
-
- ASSERT(pDataBlock->info.rows != 0);
- ASSERT(taosArrayGetSize(pDataBlock->pDataBlock) != 0);
-
- tqAddBlockDataToRsp(pDataBlock, pRsp);
-
- if (pRsp->withTbName) {
- pRsp->withTbName = 0;
-#if 0
- int64_t uid;
- int64_t ts;
- if (qGetStreamScanStatus(task, &uid, &ts) < 0) {
- ASSERT(0);
- }
- tqAddTbNameToRsp(pTq, uid, pRsp);
-#endif
- }
- pRsp->blockNum++;
-
- rowCnt += pDataBlock->info.rows;
- if (rowCnt >= 4096) break;
- }
- int64_t uid;
- int64_t ts;
- if (qGetStreamScanStatus(task, &uid, &ts) < 0) {
- ASSERT(0);
- }
- tqOffsetResetToData(&pRsp->rspOffset, uid, ts);
-
- return 0;
-}
-#endif
-
-int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp) {
+int32_t tqLogScanExec(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, SMqDataRsp* pRsp) {
+ STqExecHandle* pExec = &pHandle->execHandle;
ASSERT(pExec->subType != TOPIC_SUB_TYPE__COLUMN);
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
@@ -268,6 +251,28 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
tqAddBlockSchemaToRsp(pExec, pRsp);
pRsp->blockNum++;
}
+#if 0
+ if (pHandle->fetchMeta && pRsp->blockNum) {
+ SSubmitMsgIter iter = {0};
+ tInitSubmitMsgIter(pReq, &iter);
+ STaosxRsp* pXrsp = (STaosxRsp*)pRsp;
+ while (1) {
+ SSubmitBlk* pBlk = NULL;
+ if (tGetSubmitMsgNext(&iter, &pBlk) < 0) return -1;
+ if (pBlk->schemaLen > 0) {
+ if (pXrsp->createTableNum == 0) {
+ pXrsp->createTableLen = taosArrayInit(0, sizeof(int32_t));
+ pXrsp->createTableReq = taosArrayInit(0, sizeof(void*));
+ }
+ void* createReq = taosMemoryCalloc(1, pBlk->schemaLen);
+ memcpy(createReq, pBlk->data, pBlk->schemaLen);
+ taosArrayPush(pXrsp->createTableLen, &pBlk->schemaLen);
+ taosArrayPush(pXrsp->createTableReq, &createReq);
+ pXrsp->createTableNum++;
+ }
+ }
+ }
+#endif
}
if (pRsp->blockNum == 0) {
diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c
index 7d259fe06c..18d839e109 100644
--- a/source/libs/command/src/command.c
+++ b/source/libs/command/src/command.c
@@ -471,6 +471,7 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p
len += sprintf(buf2 + VARSTR_HEADER_SIZE, "CREATE TABLE `%s` (", tbName);
appendColumnFields(buf2, &len, pCfg);
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, ")");
+ appendTableOptions(buf2, &len, pDbCfg, pCfg);
}
varDataLen(buf2) = len;
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index 7eb02308de..b4e2840330 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -143,6 +143,8 @@ typedef struct {
STqOffsetVal prepareStatus; // for tmq
STqOffsetVal lastStatus; // for tmq
SMqMetaRsp metaRsp; // for tmq fetching meta
+ int64_t snapshotVer;
+
SSchemaWrapper *schema;
char tbName[TSDB_TABLE_NAME_LEN];
SSDataBlock* pullOverBlk; // for streaming
@@ -303,6 +305,7 @@ typedef struct SAggSupporter {
char* keyBuf; // window key buffer
SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ int32_t currentPageId; // current write page id
} SAggSupporter;
typedef struct {
@@ -327,7 +330,6 @@ typedef struct STableScanInfo {
SQueryTableDataCond cond;
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
int32_t dataBlockLoadFlag;
-// SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
SSampleExecInfo sample; // sample execution info
int32_t currentGroupId;
int32_t currentTable;
@@ -431,6 +433,7 @@ typedef struct SStreamAggSupporter {
char* pKeyBuf; // window key buffer
SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ int32_t currentPageId; // buffer page that is active
SSDataBlock* pScanBlock;
} SStreamAggSupporter;
@@ -485,24 +488,23 @@ typedef struct SStreamScanInfo {
STimeWindowAggSupp twAggSup;
SSDataBlock* pUpdateDataRes;
// status for tmq
- // SSchemaWrapper schema;
- SNodeList* pGroupTags;
- SNode* pTagCond;
- SNode* pTagIndexCond;
+ SNodeList* pGroupTags;
+ SNode* pTagCond;
+ SNode* pTagIndexCond;
} SStreamScanInfo;
-typedef struct SStreamRawScanInfo{
-// int8_t subType;
-// bool withMeta;
-// int64_t suid;
-// int64_t snapVersion;
-// void *metaInfo;
-// void *dataInfo;
- SVnode* vnode;
- SSDataBlock pRes; // result SSDataBlock
- STsdbReader* dataReader;
- SSnapContext* sContext;
-}SStreamRawScanInfo;
+typedef struct {
+ // int8_t subType;
+ // bool withMeta;
+ // int64_t suid;
+ // int64_t snapVersion;
+ // void *metaInfo;
+ // void *dataInfo;
+ SVnode* vnode;
+ SSDataBlock pRes; // result SSDataBlock
+ STsdbReader* dataReader;
+ SSnapContext* sContext;
+} SStreamRawScanInfo;
typedef struct SSysTableScanInfo {
SRetrieveMetaTableRsp* pRsp;
@@ -527,14 +529,14 @@ typedef struct SBlockDistInfo {
SSDataBlock* pResBlock;
void* pHandle;
SReadHandle readHandle;
- uint64_t uid; // table uid
+ uint64_t uid; // table uid
} SBlockDistInfo;
// todo remove this
typedef struct SOptrBasicInfo {
- SResultRowInfo resultRowInfo;
- SSDataBlock* pRes;
- bool mergeResultBlock;
+ SResultRowInfo resultRowInfo;
+ SSDataBlock* pRes;
+ bool mergeResultBlock;
} SOptrBasicInfo;
typedef struct SIntervalAggOperatorInfo {
@@ -1009,7 +1011,7 @@ int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimary
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
int32_t size);
-SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize);
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize);
SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex);
SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index 3b3ef9e3de..70180d6dc0 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -46,8 +46,8 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
rowSize += pCtx[i].resDataInfo.interBufSize;
}
- rowSize +=
- (numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(doSaveTupleData)
+ rowSize += (numOfOutput * sizeof(bool));
+ // expand rowSize to mark if col is null for top/bottom result(saveTupleData)
return rowSize;
}
@@ -1178,7 +1178,6 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
SqlFunctionCtx* pCtx = &pFuncCtx[i];
pCtx->functionId = -1;
- pCtx->curBufPage = -1;
pCtx->pExpr = pExpr;
if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) {
@@ -1191,7 +1190,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet);
} else {
char* udfName = pExpr->pExpr->_function.pFunctNode->functionName;
- strncpy(pCtx->udfName, udfName, strlen(udfName));
+ strncpy(pCtx->udfName, udfName, TSDB_FUNC_NAME_LEN);
fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet);
}
pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env);
@@ -1222,6 +1221,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
pCtx->isStream = false;
pCtx->param = pFunct->pParam;
+ pCtx->saveHandle.currentPage = -1;
}
for (int32_t i = 1; i < numOfOutput; ++i) {
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index 124f4b44b0..f1ac9ef8b1 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -139,7 +139,7 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, SSchemaWrapper** pSchema) {
if (msg == NULL) {
- // TODO create raw scan
+ // create raw scan
SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo));
if (NULL == pTaskInfo) {
@@ -151,7 +151,7 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* n
pTaskInfo->cost.created = taosGetTimestampMs();
pTaskInfo->execModel = OPTR_EXEC_MODEL_QUEUE;
pTaskInfo->pRoot = createRawScanOperatorInfo(readers, pTaskInfo);
- if(NULL == pTaskInfo->pRoot){
+ if (NULL == pTaskInfo->pRoot) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFree(pTaskInfo);
return NULL;
@@ -834,11 +834,11 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
} else {
ASSERT(0);
}
- }else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA){
+ } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
SStreamRawScanInfo* pInfo = pOperator->info;
- SSnapContext* sContext = pInfo->sContext;
- if(setForSnapShot(sContext, pOffset->uid) != 0) {
- qError("setDataForSnapShot error. uid:%"PRIi64, pOffset->uid);
+ SSnapContext* sContext = pInfo->sContext;
+ if (setForSnapShot(sContext, pOffset->uid) != 0) {
+ qError("setDataForSnapShot error. uid:%" PRIi64, pOffset->uid);
return -1;
}
@@ -847,27 +847,29 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
pInfo->dataReader = NULL;
cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond);
taosArrayDestroy(pTaskInfo->tableqinfoList.pTableList);
- if(mtInfo.uid == 0) return 0; // no data
+ if (mtInfo.uid == 0) return 0; // no data
initQueryTableDataCondForTmq(&pTaskInfo->streamInfo.tableCond, sContext, mtInfo);
pTaskInfo->streamInfo.tableCond.twindows.skey = pOffset->ts;
pTaskInfo->tableqinfoList.pTableList = taosArrayInit(1, sizeof(STableKeyInfo));
taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &(STableKeyInfo){.uid = mtInfo.uid, .groupId = 0});
- tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pTaskInfo->tableqinfoList.pTableList, &pInfo->dataReader, NULL);
+ tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pTaskInfo->tableqinfoList.pTableList,
+ &pInfo->dataReader, NULL);
strcpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName);
tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema);
pTaskInfo->streamInfo.schema = mtInfo.schema;
+
qDebug("tmqsnap qStreamPrepareScan snapshot data uid %ld ts %ld", mtInfo.uid, pOffset->ts);
- }else if(pOffset->type == TMQ_OFFSET__SNAPSHOT_META){
+ } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_META) {
SStreamRawScanInfo* pInfo = pOperator->info;
- SSnapContext* sContext = pInfo->sContext;
- if(setForSnapShot(sContext, pOffset->uid) != 0) {
- qError("setForSnapShot error. uid:%"PRIi64" ,version:%"PRIi64, pOffset->uid);
+ SSnapContext* sContext = pInfo->sContext;
+ if (setForSnapShot(sContext, pOffset->uid) != 0) {
+ qError("setForSnapShot error. uid:%" PRIi64 " ,version:%" PRIi64, pOffset->uid);
return -1;
}
qDebug("tmqsnap qStreamPrepareScan snapshot meta uid %ld ts %ld", pOffset->uid);
- }else if (pOffset->type == TMQ_OFFSET__LOG) {
+ } else if (pOffset->type == TMQ_OFFSET__LOG) {
SStreamRawScanInfo* pInfo = pOperator->info;
tsdbReaderClose(pInfo->dataReader);
pInfo->dataReader = NULL;
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index e79a9fa16e..b53d35a1a1 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -179,26 +179,23 @@ static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pR
}
#endif
-SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) {
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize) {
SFilePage* pData = NULL;
// in the first scan, new space needed for results
int32_t pageId = -1;
- SIDList list = getDataBufPagesIdList(pResultBuf);
-
- if (taosArrayGetSize(list) == 0) {
- pData = getNewBufPage(pResultBuf, tableGroupId, &pageId);
+ if (*currentPageId == -1) {
+ pData = getNewBufPage(pResultBuf, &pageId);
pData->num = sizeof(SFilePage);
} else {
- SPageInfo* pi = getLastPageInfo(list);
- pData = getBufPage(pResultBuf, getPageId(pi));
- pageId = getPageId(pi);
+ pData = getBufPage(pResultBuf, *currentPageId);
+ pageId = *currentPageId;
if (pData->num + interBufSize > getBufPageSize(pResultBuf)) {
// release current page first, and prepare the next one
- releaseBufPageInfo(pResultBuf, pi);
+ releaseBufPage(pResultBuf, pData);
- pData = getNewBufPage(pResultBuf, tableGroupId, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
if (pData != NULL) {
pData->num = sizeof(SFilePage);
}
@@ -215,9 +212,9 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int
SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num);
pResultRow->pageId = pageId;
pResultRow->offset = (int32_t)pData->num;
+ *currentPageId = pageId;
pData->num += interBufSize;
-
return pResultRow;
}
@@ -263,18 +260,15 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// allocate a new buffer page
if (pResult == NULL) {
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_2");
-#endif
ASSERT(pSup->resultRowSize > 0);
- pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize);
+ pResult = getNewResultRow(pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
initResultRow(pResult);
// add a new result set for a new group
SResultRowPosition pos = {.pageId = pResult->pageId, .offset = pResult->offset};
tSimpleHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos,
- sizeof(SResultRowPosition));
+ sizeof(SResultRowPosition));
}
// 2. set the new time window to be the new active time window
@@ -302,7 +296,7 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes
SIDList list = getDataBufPagesIdList(pResultBuf);
if (taosArrayGetSize(list) == 0) {
- pData = getNewBufPage(pResultBuf, tid, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
pData->num = sizeof(SFilePage);
} else {
SPageInfo* pi = getLastPageInfo(list);
@@ -313,7 +307,7 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes
// release current page first, and prepare the next one
releaseBufPageInfo(pResultBuf, pi);
- pData = getNewBufPage(pResultBuf, tid, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
if (pData != NULL) {
pData->num = sizeof(SFilePage);
}
@@ -2821,92 +2815,6 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
}
}
}
-#if 0
-int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) {
- uint8_t type = pOperator->operatorType;
-
- pOperator->status = OP_OPENED;
-
- if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- SStreamScanInfo* pScanInfo = pOperator->info;
- pScanInfo->blockType = STREAM_INPUT__TABLE_SCAN;
-
- pScanInfo->pTableScanOp->status = OP_OPENED;
-
- STableScanInfo* pInfo = pScanInfo->pTableScanOp->info;
- ASSERT(pInfo->scanMode == TABLE_SCAN__TABLE_ORDER);
-
- if (uid == 0) {
- pInfo->noTable = 1;
- return TSDB_CODE_SUCCESS;
- }
-
- /*if (pSnapShotScanInfo->dataReader == NULL) {*/
- /*pSnapShotScanInfo->dataReader = tsdbReaderOpen(pHandle->vnode, &pSTInfo->cond, tableList, 0, 0);*/
- /*pSnapShotScanInfo->scanMode = TABLE_SCAN__TABLE_ORDER;*/
- /*}*/
-
- pInfo->noTable = 0;
-
- if (pInfo->lastStatus.uid != uid || pInfo->lastStatus.ts != ts) {
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
-
- int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
- bool found = false;
- for (int32_t i = 0; i < tableSz; i++) {
- STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
- if (pTableInfo->uid == uid) {
- found = true;
- pInfo->currentTable = i;
- }
- }
- // TODO after processing drop, found can be false
- ASSERT(found);
-
- tsdbSetTableId(pInfo->dataReader, uid);
- int64_t oldSkey = pInfo->cond.twindows.skey;
- pInfo->cond.twindows.skey = ts + 1;
- tsdbReaderReset(pInfo->dataReader, &pInfo->cond);
- pInfo->cond.twindows.skey = oldSkey;
- pInfo->scanTimes = 0;
-
- qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid, ts,
- pInfo->currentTable, tableSz);
- }
-
- return TSDB_CODE_SUCCESS;
-
- } else {
- if (pOperator->numOfDownstream == 1) {
- return doPrepareScan(pOperator->pDownstream[0], uid, ts);
- } else if (pOperator->numOfDownstream == 0) {
- qError("failed to find stream scan operator to set the input data block");
- return TSDB_CODE_QRY_APP_ERROR;
- } else {
- qError("join not supported for stream block scan");
- return TSDB_CODE_QRY_APP_ERROR;
- }
- }
-}
-
-int32_t doGetScanStatus(SOperatorInfo* pOperator, uint64_t* uid, int64_t* ts) {
- int32_t type = pOperator->operatorType;
- if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- SStreamScanInfo* pScanInfo = pOperator->info;
- STableScanInfo* pSnapShotScanInfo = pScanInfo->pTableScanOp->info;
- *uid = pSnapShotScanInfo->lastStatus.uid;
- *ts = pSnapShotScanInfo->lastStatus.ts;
- } else {
- if (pOperator->pDownstream[0] == NULL) {
- return TSDB_CODE_INVALID_PARA;
- } else {
- doGetScanStatus(pOperator->pDownstream[0], uid, ts);
- }
- }
-
- return TSDB_CODE_SUCCESS;
-}
-#endif
// this is a blocking operator
static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
@@ -3030,7 +2938,7 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
SResultRow* pRow = (SResultRow*)((char*)pPage + pos->offset);
setBufPageDirty(pPage, true);
releaseBufPage(pSup->pResultBuf, pPage);
-
+
int32_t iter = 0;
void* pIter = NULL;
while ((pIter = tSimpleHashIterate(pSup->pResultRowHashTable, pIter, &iter))) {
@@ -3092,7 +3000,7 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) {
offset += sizeof(int32_t);
uint64_t tableGroupId = *(uint64_t*)(result + offset);
- SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, tableGroupId, pSup->resultRowSize);
+ SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
if (!resultRow) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
@@ -3440,8 +3348,10 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul
int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
const char* pKey) {
+ int32_t code = 0;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ pAggSup->currentPageId = -1;
pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t));
pAggSup->pResultRowHashTable = tSimpleHashInit(10, hashFn);
@@ -3455,18 +3365,18 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
if (!osTempSpaceAvailable()) {
- terrno = TSDB_CODE_NO_AVAIL_DISK;
- qError("Init stream agg supporter failed since %s", terrstr(terrno));
- return terrno;
- }
-
- int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
- if (code != TSDB_CODE_SUCCESS) {
- qError("Create agg result buf failed since %s", tstrerror(code));
+ code = TSDB_CODE_NO_AVAIL_DISK;
+ qError("Init stream agg supporter failed since %s, %s", terrstr(code), pKey);
return code;
}
- return TSDB_CODE_SUCCESS;
+ code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("Create agg result buf failed since %s, %s", tstrerror(code), pKey);
+ return code;
+ }
+
+ return code;
}
void cleanupAggSup(SAggSupporter* pAggSup) {
@@ -3488,7 +3398,7 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf
}
for (int32_t i = 0; i < numOfCols; ++i) {
- pSup->pCtx[i].pBuf = pAggSup->pResultBuf;
+ pSup->pCtx[i].saveHandle.pBuf = pAggSup->pResultBuf;
}
return TSDB_CODE_SUCCESS;
@@ -3520,6 +3430,7 @@ void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
}
taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx);
+ taosMemoryFreeClear(pCtx[i].subsidiaries.buf);
taosMemoryFree(pCtx[i].input.pData);
taosMemoryFree(pCtx[i].input.pColumnDataAgg);
}
@@ -4071,8 +3982,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
STableKeyInfo* pKeyInfo = taosArrayGet(pTableListInfo->pTableList, i);
qDebug("creating stream task: add table %" PRId64, pKeyInfo->uid);
}
- }
#endif
+ }
pTaskInfo->schemaInfo.qsw = extractQueriedColumnSchema(&pTableScanNode->scan);
pOperator = createStreamScanOperatorInfo(pHandle, pTableScanNode, pTagCond, pTaskInfo);
@@ -4297,42 +4208,6 @@ SArray* extractColumnInfo(SNodeList* pNodeList) {
return pList;
}
-#if 0
-STsdbReader* doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
- STableListInfo* pTableListInfo, const char* idstr) {
- int32_t code = getTableList(pHandle->meta, pHandle->vnode, &pTableScanNode->scan, pTableListInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- if (taosArrayGetSize(pTableListInfo->pTableList) == 0) {
- code = 0;
- qDebug("no table qualified for query, %s", idstr);
- goto _error;
- }
-
- SQueryTableDataCond cond = {0};
- code = initQueryTableDataCond(&cond, pTableScanNode);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- STsdbReader* pReader;
- code = tsdbReaderOpen(pHandle->vnode, &cond, pTableListInfo->pTableList, &pReader, idstr);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- cleanupQueryTableDataCond(&cond);
-
- return pReader;
-
-_error:
- terrno = code;
- return NULL;
-}
-#endif
-
static int32_t extractTbscanInStreamOpTree(SOperatorInfo* pOperator, STableScanInfo** ppInfo) {
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
if (pOperator->numOfDownstream == 0) {
@@ -4574,7 +4449,7 @@ _complete:
return code;
}
-static void doDestroyTableList(STableListInfo* pTableqinfoList) {
+void doDestroyTableList(STableListInfo* pTableqinfoList) {
taosArrayDestroy(pTableqinfoList->pTableList);
taosHashCleanup(pTableqinfoList->map);
if (pTableqinfoList->needSortTableByGroupId) {
@@ -4678,6 +4553,7 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInf
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
int32_t size) {
+ pSup->currentPageId = -1;
pSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pSup->keySize = sizeof(int64_t) + sizeof(TSKEY);
pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize);
@@ -4705,7 +4581,8 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlF
}
int32_t code = createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, tsTempDir);
for (int32_t i = 0; i < numOfOutput; ++i) {
- pCtx[i].pBuf = pSup->pResultBuf;
+ pCtx[i].saveHandle.pBuf = pSup->pResultBuf;
}
+
return code;
}
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index 9d7e833b19..5d123f723e 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -547,7 +547,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
p = taosHashGet(pInfo->pGroupSet, pInfo->keyBuf, len);
int32_t pageId = 0;
- pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
+ pPage = getNewBufPage(pInfo->pBuf, &pageId);
taosArrayPush(p->pPageList, &pageId);
*(int32_t *) pPage = 0;
@@ -562,7 +562,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
// add a new page for current group
int32_t pageId = 0;
- pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
+ pPage = getNewBufPage(pInfo->pBuf, &pageId);
taosArrayPush(p->pPageList, &pageId);
memset(pPage, 0, getBufPageSize(pInfo->pBuf));
}
diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c
index 0661ccd390..2f12a0d19b 100644
--- a/source/libs/executor/src/projectoperator.c
+++ b/source/libs/executor/src/projectoperator.c
@@ -195,16 +195,6 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS
return PROJECT_RETRIEVE_DONE;
}
-void printDataBlock1(SSDataBlock* pBlock, const char* flag) {
- if (!pBlock || pBlock->info.rows == 0) {
- qDebug("===stream===printDataBlock: Block is Null or Empty");
- return;
- }
- char* pBuf = NULL;
- qDebug("%s", dumpBlockData(pBlock, flag, &pBuf));
- taosMemoryFreeClear(pBuf);
-}
-
SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
SProjectOperatorInfo* pProjectInfo = pOperator->info;
SOptrBasicInfo* pInfo = &pProjectInfo->binfo;
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index d4c98adb7c..de6768b83a 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -1219,7 +1219,7 @@ static void setBlockGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32
static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock) {
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
SOperatorInfo* pOperator = pInfo->pStreamScanOp;
- SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows);
@@ -1228,7 +1228,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
pInfo->pRes->info.type = STREAM_NORMAL;
pInfo->pRes->info.version = pBlock->info.version;
- uint64_t* groupIdPre = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
+ uint64_t* groupIdPre = taosHashGet(pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
if (groupIdPre) {
pInfo->pRes->info.groupId = *groupIdPre;
} else {
@@ -1276,11 +1276,80 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
return 0;
}
+static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStreamScanInfo* pInfo = pOperator->info;
+
+ qDebug("queue scan called");
+ if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
+ if (pResult && pResult->info.rows > 0) {
+ qDebug("queue scan tsdb return %d rows", pResult->info.rows);
+ return pResult;
+ } else {
+ STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
+ tsdbReaderClose(pTSInfo->dataReader);
+ pTSInfo->dataReader = NULL;
+ tqOffsetResetToLog(&pTaskInfo->streamInfo.prepareStatus, pTaskInfo->streamInfo.snapshotVer);
+ qDebug("queue scan tsdb over, switch to wal ver %d", pTaskInfo->streamInfo.snapshotVer + 1);
+ if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1) < 0) {
+ return NULL;
+ }
+ ASSERT(pInfo->tqReader->pWalReader->curVersion == pTaskInfo->streamInfo.snapshotVer + 1);
+ }
+ }
+
+ if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
+ while (1) {
+ SFetchRet ret = {0};
+ tqNextBlock(pInfo->tqReader, &ret);
+ if (ret.fetchType == FETCH_TYPE__DATA) {
+ blockDataCleanup(pInfo->pRes);
+ if (setBlockIntoRes(pInfo, &ret.data) < 0) {
+ ASSERT(0);
+ }
+ // TODO clean data block
+ if (pInfo->pRes->info.rows > 0) {
+ qDebug("queue scan log return %d rows", pInfo->pRes->info.rows);
+ return pInfo->pRes;
+ }
+ } else if (ret.fetchType == FETCH_TYPE__META) {
+ ASSERT(0);
+ // pTaskInfo->streamInfo.lastStatus = ret.offset;
+ // pTaskInfo->streamInfo.metaBlk = ret.meta;
+ // return NULL;
+ } else if (ret.fetchType == FETCH_TYPE__NONE) {
+ pTaskInfo->streamInfo.lastStatus = ret.offset;
+ ASSERT(pTaskInfo->streamInfo.lastStatus.version >= pTaskInfo->streamInfo.prepareStatus.version);
+ ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 == pInfo->tqReader->pWalReader->curVersion);
+ char formatBuf[80];
+ tFormatOffset(formatBuf, 80, &ret.offset);
+ qDebug("queue scan log return null, offset %s", formatBuf);
+ return NULL;
+ } else {
+ ASSERT(0);
+ }
+ }
+ } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
+ if (pResult && pResult->info.rows > 0) {
+ qDebug("stream scan tsdb return %d rows", pResult->info.rows);
+ return pResult;
+ }
+ qDebug("stream scan tsdb return null");
+ return NULL;
+ } else {
+ ASSERT(0);
+ return NULL;
+ }
+}
+
static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
// NOTE: this operator does never check if current status is done or not
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStreamScanInfo* pInfo = pOperator->info;
+ qDebug("stream scan called");
#if 0
SStreamState* pState = pTaskInfo->streamInfo.pState;
if (pState) {
@@ -1317,48 +1386,6 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
}
#endif
- qDebug("stream scan called");
- if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
- while (1) {
- SFetchRet ret = {0};
- tqNextBlock(pInfo->tqReader, &ret);
- if (ret.fetchType == FETCH_TYPE__DATA) {
- blockDataCleanup(pInfo->pRes);
- if (setBlockIntoRes(pInfo, &ret.data) < 0) {
- ASSERT(0);
- }
- // TODO clean data block
- if (pInfo->pRes->info.rows > 0) {
- qDebug("stream scan log return %d rows", pInfo->pRes->info.rows);
- return pInfo->pRes;
- }
- } else if (ret.fetchType == FETCH_TYPE__META) {
- ASSERT(0);
-// pTaskInfo->streamInfo.lastStatus = ret.offset;
-// pTaskInfo->streamInfo.metaBlk = ret.meta;
-// return NULL;
- } else if (ret.fetchType == FETCH_TYPE__NONE) {
- pTaskInfo->streamInfo.lastStatus = ret.offset;
- ASSERT(pTaskInfo->streamInfo.lastStatus.version >= pTaskInfo->streamInfo.prepareStatus.version);
- ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 == pInfo->tqReader->pWalReader->curVersion);
- char formatBuf[80];
- tFormatOffset(formatBuf, 80, &ret.offset);
- qDebug("stream scan log return null, offset %s", formatBuf);
- return NULL;
- } else {
- ASSERT(0);
- }
- }
- } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
- SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
- if (pResult && pResult->info.rows > 0) {
- qDebug("stream scan tsdb return %d rows", pResult->info.rows);
- return pResult;
- }
- qDebug("stream scan tsdb return null");
- return NULL;
- }
-
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
memcpy(&pTSInfo->cond, &pTaskInfo->streamInfo.tableCond, sizeof(SQueryTableDataCond));
@@ -1554,14 +1581,14 @@ static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
}
static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
-// NOTE: this operator does never check if current status is done or not
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ // NOTE: this operator does never check if current status is done or not
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStreamRawScanInfo* pInfo = pOperator->info;
- pTaskInfo->streamInfo.metaRsp.metaRspLen = 0; // use metaRspLen !=0 to judge if data is meta
+ pTaskInfo->streamInfo.metaRsp.metaRspLen = 0; // use metaRspLen !=0 to judge if data is meta
pTaskInfo->streamInfo.metaRsp.metaRsp = NULL;
qDebug("tmqsnap doRawScan called");
- if(pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA){
+ if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
SSDataBlock* pBlock = &pInfo->pRes;
if (pInfo->dataReader && tsdbNextDataBlock(pInfo->dataReader)) {
@@ -1585,42 +1612,38 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
}
SMetaTableInfo mtInfo = getUidfromSnapShot(pInfo->sContext);
- if (mtInfo.uid == 0){ //read snapshot done, change to get data from wal
+ if (mtInfo.uid == 0) { // read snapshot done, change to get data from wal
qDebug("tmqsnap read snapshot done, change to get data from wal");
pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid;
pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
pTaskInfo->streamInfo.lastStatus.version = pInfo->sContext->snapVersion;
- tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema);
- }else{
+ } else {
pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid;
pTaskInfo->streamInfo.prepareStatus.ts = INT64_MIN;
qDebug("tmqsnap change get data uid:%ld", mtInfo.uid);
qStreamPrepareScan(pTaskInfo, &pTaskInfo->streamInfo.prepareStatus, pInfo->sContext->subType);
- strcpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName);
- tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema);
- pTaskInfo->streamInfo.schema = mtInfo.schema;
}
qDebug("tmqsnap stream scan tsdb return null");
return NULL;
- }else if(pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META){
- SSnapContext *sContext = pInfo->sContext;
- void* data = NULL;
- int32_t dataLen = 0;
- int16_t type = 0;
- int64_t uid = 0;
- if(getMetafromSnapShot(sContext, &data, &dataLen, &type, &uid) < 0){
+ } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) {
+ SSnapContext* sContext = pInfo->sContext;
+ void* data = NULL;
+ int32_t dataLen = 0;
+ int16_t type = 0;
+ int64_t uid = 0;
+ if (getMetafromSnapShot(sContext, &data, &dataLen, &type, &uid) < 0) {
qError("tmqsnap getMetafromSnapShot error");
taosMemoryFreeClear(data);
return NULL;
}
- if(!sContext->queryMetaOrData){ // change to get data next poll request
+ if (!sContext->queryMetaOrData) { // change to get data next poll request
pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META;
pTaskInfo->streamInfo.lastStatus.uid = uid;
pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__SNAPSHOT_DATA;
pTaskInfo->streamInfo.metaRsp.rspOffset.uid = 0;
pTaskInfo->streamInfo.metaRsp.rspOffset.ts = INT64_MIN;
- }else{
+ } else {
pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META;
pTaskInfo->streamInfo.lastStatus.uid = uid;
pTaskInfo->streamInfo.metaRsp.rspOffset = pTaskInfo->streamInfo.lastStatus;
@@ -1631,44 +1654,44 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
return NULL;
}
-// else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
-// int64_t fetchVer = pTaskInfo->streamInfo.prepareStatus.version + 1;
-//
-// while(1){
-// if (tqFetchLog(pInfo->tqReader->pWalReader, pInfo->sContext->withMeta, &fetchVer, &pInfo->pCkHead) < 0) {
-// qDebug("tmqsnap tmq poll: consumer log end. offset %" PRId64, fetchVer);
-// pTaskInfo->streamInfo.lastStatus.version = fetchVer;
-// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
-// return NULL;
-// }
-// SWalCont* pHead = &pInfo->pCkHead->head;
-// qDebug("tmqsnap tmq poll: consumer log offset %" PRId64 " msgType %d", fetchVer, pHead->msgType);
-//
-// if (pHead->msgType == TDMT_VND_SUBMIT) {
-// SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
-// tqReaderSetDataMsg(pInfo->tqReader, pCont, 0);
-// SSDataBlock* block = tqLogScanExec(pInfo->sContext->subType, pInfo->tqReader, pInfo->pFilterOutTbUid, &pInfo->pRes);
-// if(block){
-// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
-// pTaskInfo->streamInfo.lastStatus.version = fetchVer;
-// qDebug("tmqsnap fetch data msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
-// return block;
-// }else{
-// fetchVer++;
-// }
-// } else{
-// ASSERT(pInfo->sContext->withMeta);
-// ASSERT(IS_META_MSG(pHead->msgType));
-// qDebug("tmqsnap fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
-// pTaskInfo->streamInfo.metaRsp.rspOffset.version = fetchVer;
-// pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__LOG;
-// pTaskInfo->streamInfo.metaRsp.resMsgType = pHead->msgType;
-// pTaskInfo->streamInfo.metaRsp.metaRspLen = pHead->bodyLen;
-// pTaskInfo->streamInfo.metaRsp.metaRsp = taosMemoryMalloc(pHead->bodyLen);
-// memcpy(pTaskInfo->streamInfo.metaRsp.metaRsp, pHead->body, pHead->bodyLen);
-// return NULL;
-// }
-// }
+ // else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
+ // int64_t fetchVer = pTaskInfo->streamInfo.prepareStatus.version + 1;
+ //
+ // while(1){
+ // if (tqFetchLog(pInfo->tqReader->pWalReader, pInfo->sContext->withMeta, &fetchVer, &pInfo->pCkHead) < 0) {
+ // qDebug("tmqsnap tmq poll: consumer log end. offset %" PRId64, fetchVer);
+ // pTaskInfo->streamInfo.lastStatus.version = fetchVer;
+ // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
+ // return NULL;
+ // }
+ // SWalCont* pHead = &pInfo->pCkHead->head;
+ // qDebug("tmqsnap tmq poll: consumer log offset %" PRId64 " msgType %d", fetchVer, pHead->msgType);
+ //
+ // if (pHead->msgType == TDMT_VND_SUBMIT) {
+ // SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
+ // tqReaderSetDataMsg(pInfo->tqReader, pCont, 0);
+ // SSDataBlock* block = tqLogScanExec(pInfo->sContext->subType, pInfo->tqReader, pInfo->pFilterOutTbUid,
+ // &pInfo->pRes); if(block){
+ // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
+ // pTaskInfo->streamInfo.lastStatus.version = fetchVer;
+ // qDebug("tmqsnap fetch data msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
+ // return block;
+ // }else{
+ // fetchVer++;
+ // }
+ // } else{
+ // ASSERT(pInfo->sContext->withMeta);
+ // ASSERT(IS_META_MSG(pHead->msgType));
+ // qDebug("tmqsnap fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
+ // pTaskInfo->streamInfo.metaRsp.rspOffset.version = fetchVer;
+ // pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__LOG;
+ // pTaskInfo->streamInfo.metaRsp.resMsgType = pHead->msgType;
+ // pTaskInfo->streamInfo.metaRsp.metaRspLen = pHead->bodyLen;
+ // pTaskInfo->streamInfo.metaRsp.metaRsp = taosMemoryMalloc(pHead->bodyLen);
+ // memcpy(pTaskInfo->streamInfo.metaRsp.metaRsp, pHead->body, pHead->bodyLen);
+ // return NULL;
+ // }
+ // }
return NULL;
}
@@ -1689,7 +1712,7 @@ SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pT
// create tq reader
SStreamRawScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamRawScanInfo));
- SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
return NULL;
@@ -1699,13 +1722,12 @@ SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pT
pInfo->sContext = pHandle->sContext;
pOperator->name = "RawStreamScanOperator";
-// pOperator->blocking = false;
-// pOperator->status = OP_NOT_OPENED;
+ // pOperator->blocking = false;
+ // pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
- pOperator->fpSet = createOperatorFpSet(NULL, doRawScan, NULL, NULL, destroyRawScanOperatorInfo,
- NULL, NULL, NULL);
+ pOperator->fpSet = createOperatorFpSet(NULL, doRawScan, NULL, NULL, destroyRawScanOperatorInfo, NULL, NULL, NULL);
return pOperator;
}
@@ -1724,7 +1746,7 @@ static void destroyStreamScanOperatorInfo(void* param) {
}
if (pStreamScan->pPseudoExpr) {
destroyExprInfo(pStreamScan->pPseudoExpr, pStreamScan->numOfPseudoExpr);
- taosMemoryFreeClear(pStreamScan->pPseudoExpr);
+ taosMemoryFree(pStreamScan->pPseudoExpr);
}
updateInfoDestroy(pStreamScan->pUpdateInfo);
@@ -1815,6 +1837,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->readHandle = *pHandle;
pInfo->tableUid = pScanPhyNode->uid;
+ pTaskInfo->streamInfo.snapshotVer = pHandle->version;
// set the extract column id to streamHandle
tqReaderSetColIdList(pInfo->tqReader, pColIds);
@@ -1858,8 +1881,9 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
pOperator->pTaskInfo = pTaskInfo;
- pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamScan, NULL, NULL, destroyStreamScanOperatorInfo,
- NULL, NULL, NULL);
+ __optr_fn_t nextFn = pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM ? doStreamScan : doQueueScan;
+ pOperator->fpSet =
+ createOperatorFpSet(operatorDummyOpenFn, nextFn, NULL, NULL, destroyStreamScanOperatorInfo, NULL, NULL, NULL);
return pOperator;
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index b97970aeef..152bd5939d 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -1828,12 +1828,6 @@ static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SInt
return needed;
}
-void increaseTs(SqlFunctionCtx* pCtx) {
- if (pCtx[0].pExpr->pExpr->_function.pFunctNode->funcType == FUNCTION_TYPE_WSTART) {
-// pCtx[0].increase = true;
- }
-}
-
void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSupporter* pSup) {
if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
// Todo(liuyao) support partition by column
@@ -1895,7 +1889,6 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
if (isStream) {
ASSERT(numOfCols > 0);
- increaseTs(pSup->pCtx);
initStreamFunciton(pSup->pCtx, pSup->numOfExprs);
}
@@ -3050,6 +3043,7 @@ static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo)
tSimpleHashClear(pInfo->aggSup.pResultRowHashTable);
clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
+ pInfo->aggSup.currentPageId = -1;
}
static void clearSpecialDataBlock(SSDataBlock* pBlock) {
@@ -3420,7 +3414,6 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
initBasicInfo(&pInfo->binfo, pResBlock);
ASSERT(numOfCols > 0);
- increaseTs(pOperator->exprSupp.pCtx);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
@@ -3451,6 +3444,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
// semi interval operator does not catch result
pInfo->isFinal = false;
pOperator->name = "StreamSemiIntervalOperator";
+ ASSERT(pInfo->aggSup.currentPageId == -1);
}
if (!IS_FINAL_OP(pInfo) || numOfChild == 0) {
@@ -3559,11 +3553,10 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo*
initBasicInfo(pBasicInfo, pResultBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
- pSup->pCtx[i].pBuf = NULL;
+ pSup->pCtx[i].saveHandle.pBuf = NULL;
}
ASSERT(numOfCols > 0);
- increaseTs(pSup->pCtx);
return TSDB_CODE_SUCCESS;
}
@@ -3820,7 +3813,7 @@ static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pRes
}
if (pWinInfo->pos.pageId == -1) {
- *pResult = getNewResultRow(pAggSup->pResultBuf, groupId, pAggSup->resultRowSize);
+ *pResult = getNewResultRow(pAggSup->pResultBuf, &pAggSup->currentPageId, pAggSup->resultRowSize);
if (*pResult == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -4337,6 +4330,7 @@ static void clearStreamSessionOperator(SStreamSessionAggOperatorInfo* pInfo) {
}
}
clearDiskbasedBuf(pInfo->streamAggSup.pResultBuf);
+ pInfo->streamAggSup.currentPageId = -1;
}
static void removeSessionResults(SHashObj* pHashMap, SArray* pWins) {
diff --git a/source/libs/executor/src/tlinearhash.c b/source/libs/executor/src/tlinearhash.c
index e0752840db..cffabcb6ac 100644
--- a/source/libs/executor/src/tlinearhash.c
+++ b/source/libs/executor/src/tlinearhash.c
@@ -97,7 +97,7 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t
// allocate the overflow buffer page to hold this k/v.
int32_t newPageId = -1;
- SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, 0, &newPageId);
+ SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, &newPageId);
if (pNewPage == NULL) {
return terrno;
}
@@ -227,7 +227,7 @@ static int32_t doAddNewBucket(SLHashObj* pHashObj) {
}
int32_t pageId = -1;
- SFilePage* p = getNewBufPage(pHashObj->pBuf, 0, &pageId);
+ SFilePage* p = getNewBufPage(pHashObj->pBuf, &pageId);
if (p == NULL) {
return terrno;
}
diff --git a/source/libs/executor/src/tsimplehash.c b/source/libs/executor/src/tsimplehash.c
index 8cd376e092..84b615af7a 100644
--- a/source/libs/executor/src/tsimplehash.c
+++ b/source/libs/executor/src/tsimplehash.c
@@ -295,11 +295,7 @@ int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t ke
}
if (*pIter == (void *)GET_SHASH_NODE_DATA(pNode)) {
- if (!pPrev) {
- *pIter = NULL;
- } else {
- *pIter = GET_SHASH_NODE_DATA(pPrev);
- }
+ *pIter = pPrev ? GET_SHASH_NODE_DATA(pPrev) : NULL;
}
FREE_HASH_NODE(pNode);
diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c
index fc411e850a..168cd21c44 100644
--- a/source/libs/executor/src/tsort.c
+++ b/source/libs/executor/src/tsort.c
@@ -180,7 +180,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
}
int32_t pageId = -1;
- void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId);
+ void* pPage = getNewBufPage(pHandle->pBuf, &pageId);
if (pPage == NULL) {
blockDataDestroy(p);
return terrno;
@@ -512,7 +512,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
}
int32_t pageId = -1;
- void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId);
+ void* pPage = getNewBufPage(pHandle->pBuf, &pageId);
if (pPage == NULL) {
return terrno;
}
diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c
index b7cd02befd..648ae5a538 100644
--- a/source/libs/function/src/builtins.c
+++ b/source/libs/function/src/builtins.c
@@ -311,6 +311,22 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le
return TSDB_CODE_SUCCESS;
}
+static int32_t translateMinMax(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
+ if (1 != LIST_LENGTH(pFunc->pParameterList)) {
+ return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
+ if (!IS_TIMESTAMP_TYPE(paraType) && !IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ } else if (IS_NULL_TYPE(paraType)) {
+ paraType = TSDB_DATA_TYPE_BIGINT;
+ }
+
+ pFunc->node.resType = (SDataType){.bytes = tDataTypes[paraType].bytes, .type = paraType};
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isLtrim) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
@@ -698,7 +714,7 @@ static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- if (!IS_NUMERIC_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -713,7 +729,7 @@ static int32_t translateSpreadImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (isPartial) {
- if (!IS_NUMERIC_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
pFunc->node.resType = (SDataType){.bytes = getSpreadInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY};
@@ -788,7 +804,7 @@ static int32_t translateElapsedImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
}
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- if (TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1634,7 +1650,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType &&
- TSDB_DATA_TYPE_TIMESTAMP != colType) {
+ !IS_TIMESTAMP_TYPE(colType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1660,7 +1676,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
}
uint8_t resType;
- if (IS_SIGNED_NUMERIC_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType || TSDB_DATA_TYPE_TIMESTAMP == colType) {
+ if (IS_SIGNED_NUMERIC_TYPE(colType) || IS_TIMESTAMP_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) {
resType = TSDB_DATA_TYPE_BIGINT;
} else {
resType = TSDB_DATA_TYPE_DOUBLE;
@@ -1825,7 +1841,7 @@ static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t l
// param0
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- if (!IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1878,7 +1894,7 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
- if ((!IS_STR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && TSDB_DATA_TYPE_TIMESTAMP != para1Type) ||
+ if ((!IS_STR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && !IS_TIMESTAMP_TYPE(para1Type)) ||
!IS_INTEGER_TYPE(para2Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1911,7 +1927,7 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le
for (int32_t i = 0; i < 2; ++i) {
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
- if (!IS_STR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_STR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
}
@@ -2060,7 +2076,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.name = "min",
.type = FUNCTION_TYPE_MIN,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC,
- .translateFunc = translateInOutNum,
+ .translateFunc = translateMinMax,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = minmaxFunctionSetup,
@@ -2075,7 +2091,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.name = "max",
.type = FUNCTION_TYPE_MAX,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC,
- .translateFunc = translateInOutNum,
+ .translateFunc = translateMinMax,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = minmaxFunctionSetup,
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index 32d0472a50..7160541c13 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -1146,8 +1146,9 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
return true;
}
-static void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
-static void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
+static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock);
+static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
+static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos);
static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) {
// the data is loaded, not only the block SMA value
@@ -1159,6 +1160,7 @@ static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, c
}
ASSERT(0);
+ return 0;
}
int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
@@ -1199,10 +1201,10 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
pBuf->v = *(int64_t*)tval;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
} else {
- if (IS_SIGNED_NUMERIC_TYPE(type)) {
+ if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type)) {
int64_t prev = 0;
GET_TYPED_DATA(prev, int64_t, type, &pBuf->v);
@@ -1211,10 +1213,9 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(int64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
-
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
uint64_t prev = 0;
GET_TYPED_DATA(prev, uint64_t, type, &pBuf->v);
@@ -1224,7 +1225,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(uint64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
@@ -1236,7 +1237,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(double*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
} else if (type == TSDB_DATA_TYPE_FLOAT) {
@@ -1250,7 +1251,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
}
@@ -1262,7 +1263,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
int32_t start = pInput->startRowIndex;
int32_t numOfRows = pInput->numOfRows;
- if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
+ if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
if (type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_BOOL) {
int8_t* pData = (int8_t*)pCol->pData;
int8_t* val = (int8_t*)&pBuf->v;
@@ -1275,7 +1276,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1287,7 +1288,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1306,7 +1307,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1318,7 +1319,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1337,7 +1338,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1349,14 +1350,15 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
numOfElems += 1;
}
- } else if (type == TSDB_DATA_TYPE_BIGINT) {
+ } else if (type == TSDB_DATA_TYPE_BIGINT ||
+ type == TSDB_DATA_TYPE_TIMESTAMP) {
int64_t* pData = (int64_t*)pCol->pData;
int64_t* val = (int64_t*)&pBuf->v;
@@ -1368,7 +1370,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1380,7 +1382,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1401,7 +1403,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1413,7 +1415,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1432,7 +1434,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1444,7 +1446,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1463,7 +1465,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1475,7 +1477,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1494,7 +1496,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1506,7 +1508,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1526,7 +1528,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1538,7 +1540,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1557,7 +1559,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1569,7 +1571,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1580,7 +1582,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
_min_max_over:
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved ) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos);
+ pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pBuf->nullTupleSaved = true;
}
return numOfElems;
@@ -1599,8 +1601,7 @@ int32_t maxFunction(SqlFunctionCtx* pCtx) {
}
static void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex);
-
-static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rIndex);
+static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex);
int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
@@ -1648,34 +1649,29 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple
return;
}
- int32_t pageId = pTuplePos->pageId;
- int32_t offset = pTuplePos->offset;
+ if (pCtx->saveHandle.pBuf != NULL) {
+ if (pTuplePos->pageId != -1) {
+ int32_t numOfCols = pCtx->subsidiaries.num;
+ const char* p = loadTupleData(pCtx, pTuplePos);
- if (pTuplePos->pageId != -1) {
- int32_t numOfCols = pCtx->subsidiaries.num;
- SFilePage* pPage = getBufPage(pCtx->pBuf, pageId);
+ bool* nullList = (bool*)p;
+ char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
- bool* nullList = (bool*)((char*)pPage + offset);
- char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
+ // todo set the offset value to optimize the performance.
+ for (int32_t j = 0; j < numOfCols; ++j) {
+ SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
+ int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
- // todo set the offset value to optimize the performance.
- for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
-
- SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
- int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
-
- SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
- ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
- if (nullList[j]) {
- colDataAppendNULL(pDstCol, rowIndex);
- } else {
- colDataAppend(pDstCol, rowIndex, pStart, false);
+ SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
+ ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
+ if (nullList[j]) {
+ colDataAppendNULL(pDstCol, rowIndex);
+ } else {
+ colDataAppend(pDstCol, rowIndex, pStart, false);
+ }
+ pStart += pDstCol->info.bytes;
}
- pStart += pDstCol->info.bytes;
}
-
- releaseBufPage(pCtx->pBuf, pPage);
}
}
@@ -2756,15 +2752,15 @@ static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowInde
return *(TSKEY*)colDataGetData(pTsColInfo, rowIndex);
}
-static void saveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) {
+static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) {
if (pCtx->subsidiaries.num <= 0) {
return;
}
if (!pInfo->hasResult) {
- doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
+ pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock);
} else {
- doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
+ updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
}
}
@@ -2778,7 +2774,7 @@ static void doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t cur
memcpy(pInfo->buf, pData, pInfo->bytes);
pInfo->ts = currentTs;
- saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
+ firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
pInfo->hasResult = true;
}
@@ -2982,7 +2978,7 @@ static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, S
pOutput->bytes = pInput->bytes;
memcpy(pOutput->buf, pInput->buf, pOutput->bytes);
- saveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
+ firstlastSaveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
pOutput->hasResult = true;
}
@@ -3087,7 +3083,7 @@ static void doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex, i
}
pInfo->ts = cts;
- saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
+ firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
pInfo->hasResult = true;
}
@@ -3420,7 +3416,7 @@ int32_t topFunction(SqlFunctionCtx* pCtx) {
}
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
+ pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pRes->nullTupleSaved = true;
}
return TSDB_CODE_SUCCESS;
@@ -3448,7 +3444,7 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) {
}
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
+ pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pRes->nullTupleSaved = true;
}
@@ -3500,7 +3496,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
// save the data of this tuple
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
+ pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId,
@@ -3524,7 +3520,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
// save the data of this tuple by over writing the old data
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
+ updateTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset);
@@ -3541,38 +3537,13 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
* |(n columns, one bit for each column)| src column #1| src column #2|
* +------------------------------------+--------------+--------------+
*/
-void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
- SFilePage* pPage = NULL;
+void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsidiaryResInfo* pSubsidiaryies, char* buf) {
+ char* nullList = buf;
+ char* pStart = (char*)(nullList + sizeof(bool) * pSubsidiaryies->num);
- // todo refactor: move away
- int32_t completeRowSize = pCtx->subsidiaries.num * sizeof(bool);
- for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
- completeRowSize += pc->pExpr->base.resSchema.bytes;
- }
-
- if (pCtx->curBufPage == -1) {
- pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
- pPage->num = sizeof(SFilePage);
- } else {
- pPage = getBufPage(pCtx->pBuf, pCtx->curBufPage);
- if (pPage->num + completeRowSize > getBufPageSize(pCtx->pBuf)) {
- // current page is all used, let's prepare a new buffer page
- releaseBufPage(pCtx->pBuf, pPage);
- pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
- pPage->num = sizeof(SFilePage);
- }
- }
-
- pPos->pageId = pCtx->curBufPage;
- pPos->offset = pPage->num;
-
- // keep the current row data, extract method
int32_t offset = 0;
- bool* nullList = (bool*)((char*)pPage + pPage->num);
- char* pStart = (char*)(nullList + sizeof(bool) * pCtx->subsidiaries.num);
- for (int32_t i = 0; i < pCtx->subsidiaries.num; ++i) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
+ for (int32_t i = 0; i < pSubsidiaryies->num; ++i) {
+ SqlFunctionCtx* pc = pSubsidiaryies->pCtx[i];
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
int32_t srcSlotId = pFuncParam->pCol->slotId;
@@ -3593,50 +3564,88 @@ void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock*
offset += pCol->info.bytes;
}
- pPage->num += completeRowSize;
-
- setBufPageDirty(pPage, true);
- releaseBufPage(pCtx->pBuf, pPage);
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, pPos->offset);
-#endif
+ return buf;
}
-void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
- SFilePage* pPage = getBufPage(pCtx->pBuf, pPos->pageId);
+static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length) {
+ STuplePos p = {0};
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = NULL;
- int32_t numOfCols = pCtx->subsidiaries.num;
-
- bool* nullList = (bool*)((char*)pPage + pPos->offset);
- char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
-
- int32_t offset = 0;
- for (int32_t i = 0; i < numOfCols; ++i) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
- SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
- int32_t srcSlotId = pFuncParam->pCol->slotId;
-
- SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId);
- if ((nullList[i] = colDataIsNull_s(pCol, rowIndex)) == true) {
- offset += pCol->info.bytes;
- continue;
- }
-
- char* p = colDataGetData(pCol, rowIndex);
- if (IS_VAR_DATA_TYPE(pCol->info.type)) {
- memcpy(pStart + offset, p, (pCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(p) : varDataTLen(p));
+ if (pHandle->currentPage == -1) {
+ pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
+ pPage->num = sizeof(SFilePage);
} else {
- memcpy(pStart + offset, p, pCol->info.bytes);
+ pPage = getBufPage(pHandle->pBuf, pHandle->currentPage);
+ if (pPage->num + length > getBufPageSize(pHandle->pBuf)) {
+ // current page is all used, let's prepare a new buffer page
+ releaseBufPage(pHandle->pBuf, pPage);
+ pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
+ pPage->num = sizeof(SFilePage);
+ }
}
- offset += pCol->info.bytes;
+ p = (STuplePos) {.pageId = pHandle->currentPage, .offset = pPage->num};
+ memcpy(pPage->data + pPage->num, pBuf, length);
+
+ pPage->num += length;
+ setBufPageDirty(pPage, true);
+ releaseBufPage(pHandle->pBuf, pPage);
+ } else {
+ // other tuple save policy
}
- setBufPageDirty(pPage, true);
- releaseBufPage(pCtx->pBuf, pPage);
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_copyTuple pos:%p, pageId:%d, offset:%d", pPos, pPos->pageId, pPos->offset);
-#endif
+ return p;
+}
+
+STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock) {
+ if (pCtx->subsidiaries.rowLen == 0) {
+ int32_t rowLen = 0;
+ for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
+ SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
+ rowLen += pc->pExpr->base.resSchema.bytes;
+ }
+
+ pCtx->subsidiaries.rowLen = rowLen + pCtx->subsidiaries.num * sizeof(bool);
+ pCtx->subsidiaries.buf = taosMemoryMalloc(pCtx->subsidiaries.rowLen);
+ }
+
+ char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
+ return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen);
+}
+
+static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) {
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
+ memcpy(pPage->data + pPos->offset, pBuf, length);
+ setBufPageDirty(pPage, true);
+ releaseBufPage(pHandle->pBuf, pPage);
+ } else {
+
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
+ char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
+ doUpdateTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pPos);
+ return TSDB_CODE_SUCCESS;
+}
+
+static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPos) {
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
+ char* p = pPage->data + pPos->offset;
+ releaseBufPage(pHandle->pBuf, pPage);
+ return p;
+ } else {
+ return NULL;
+ }
+}
+
+static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos) {
+ return doLoadTupleData(&pCtx->saveHandle, pPos);
}
int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
@@ -3788,8 +3797,6 @@ int32_t spreadFunction(SqlFunctionCtx* pCtx) {
SColumnInfoData* pCol = pInput->pData[0];
int32_t start = pInput->startRowIndex;
- int32_t numOfRows = pInput->numOfRows;
-
// check the valid data one by one
for (int32_t i = start; i < pInput->numOfRows + start; ++i) {
if (colDataIsNull_f(pCol->nullbitmap, i)) {
@@ -4964,7 +4971,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
if (pInfo->numSampled < pInfo->samples) {
sampleAssignResult(pInfo, data, pInfo->numSampled);
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[pInfo->numSampled]);
+ pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
pInfo->numSampled++;
} else {
@@ -4972,7 +4979,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
if (j < pInfo->samples) {
sampleAssignResult(pInfo, data, j);
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
+ updateTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
}
}
}
@@ -4995,7 +5002,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
}
if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pInfo->nullTuplePos);
+ pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pInfo->nullTupleSaved = true;
}
diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c
index 152a970c48..26735fa263 100644
--- a/source/libs/function/src/functionMgt.c
+++ b/source/libs/function/src/functionMgt.c
@@ -101,6 +101,14 @@ bool fmIsBuiltinFunc(const char* pFunc) {
return NULL != taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc));
}
+EFunctionType fmGetFuncType(const char* pFunc) {
+ void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc));
+ if (NULL != pVal) {
+ return funcMgtBuiltins[*(int32_t*)pVal].type;
+ }
+ return FUNCTION_TYPE_UDF;
+}
+
EFuncDataRequired fmFuncDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow) {
if (fmIsUserDefinedFunc(pFunc->funcId) || pFunc->funcId < 0 || pFunc->funcId >= funcMgtBuiltinsNum) {
return FUNC_DATA_REQUIRED_DATA_LOAD;
diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c
index dbe0b6bb3a..4c58c0abe5 100644
--- a/source/libs/function/src/tpercentile.c
+++ b/source/libs/function/src/tpercentile.c
@@ -372,7 +372,7 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
pPageIdList = pList;
}
- pSlot->info.data = getNewBufPage(pBucket->pBuffer, groupId, &pageId);
+ pSlot->info.data = getNewBufPage(pBucket->pBuffer, &pageId);
pSlot->info.pageId = pageId;
taosArrayPush(pPageIdList, &pageId);
}
diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c
index d5a3e91eea..5de9c52cc1 100644
--- a/source/libs/function/src/tudf.c
+++ b/source/libs/function/src/tudf.c
@@ -81,7 +81,7 @@ static int32_t udfSpawnUdfd(SUdfdData* pData) {
taosDirName(path);
#endif
} else {
- strncpy(path, tsProcPath, strlen(tsProcPath));
+ strncpy(path, tsProcPath, PATH_MAX);
taosDirName(path);
}
#ifdef WINDOWS
diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c
index 5b27e030b9..a45e4585e8 100644
--- a/source/libs/function/src/udfd.c
+++ b/source/libs/function/src/udfd.c
@@ -453,7 +453,7 @@ void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
goto _return;
}
taosCloseFile(&file);
- strncpy(udf->path, path, strlen(path));
+ strncpy(udf->path, path, PATH_MAX);
tFreeSFuncInfo(pFuncInfo);
taosArrayDestroy(retrieveRsp.pFuncInfos);
msgInfo->code = 0;
@@ -566,17 +566,17 @@ int32_t udfdLoadUdf(char *udfName, SUdf *udf) {
uv_dlsym(&udf->lib, processFuncName, (void **)(&udf->aggProcFunc));
char startFuncName[TSDB_FUNC_NAME_LEN + 6] = {0};
char *startSuffix = "_start";
- strncpy(startFuncName, processFuncName, strlen(processFuncName));
+ strncpy(startFuncName, processFuncName, sizeof(startFuncName));
strncat(startFuncName, startSuffix, strlen(startSuffix));
uv_dlsym(&udf->lib, startFuncName, (void **)(&udf->aggStartFunc));
char finishFuncName[TSDB_FUNC_NAME_LEN + 7] = {0};
char *finishSuffix = "_finish";
- strncpy(finishFuncName, processFuncName, strlen(processFuncName));
+ strncpy(finishFuncName, processFuncName, sizeof(finishFuncName));
strncat(finishFuncName, finishSuffix, strlen(finishSuffix));
uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggFinishFunc));
char mergeFuncName[TSDB_FUNC_NAME_LEN + 6] = {0};
char *mergeSuffix = "_merge";
- strncpy(finishFuncName, processFuncName, strlen(processFuncName));
+ strncpy(finishFuncName, processFuncName, sizeof(finishFuncName));
strncat(finishFuncName, mergeSuffix, strlen(mergeSuffix));
uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggMergeFunc));
}
diff --git a/source/libs/index/src/indexComm.c b/source/libs/index/src/indexComm.c
index 4f33d98f9e..cd52d122f7 100644
--- a/source/libs/index/src/indexComm.c
+++ b/source/libs/index/src/indexComm.c
@@ -171,6 +171,7 @@ TExeCond tCompare(__compar_fn_t func, int8_t cmptype, void* a, void* b, int8_t d
return tDoCompare(func, cmptype, &va, &vb);
}
assert(0);
+ return BREAK;
#endif
}
TExeCond tDoCompare(__compar_fn_t func, int8_t comparType, void* a, void* b) {
diff --git a/source/libs/index/src/indexFstFile.c b/source/libs/index/src/indexFstFile.c
index 4f278c7af6..1900e50973 100644
--- a/source/libs/index/src/indexFstFile.c
+++ b/source/libs/index/src/indexFstFile.c
@@ -39,7 +39,8 @@ static void idxGenLRUKey(char* buf, const char* path, int32_t blockId) {
}
static int idxFileCtxDoWrite(IFileCtx* ctx, uint8_t* buf, int len) {
if (ctx->type == TFILE) {
- assert(len == taosWriteFile(ctx->file.pFile, buf, len));
+ int nwr = taosWriteFile(ctx->file.pFile, buf, len);
+ assert(nwr == len);
} else {
memcpy(ctx->mem.buf + ctx->offset, buf, len);
}
diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c
index 82b5842663..4dcd2bba5a 100644
--- a/source/libs/parser/src/parAstParser.c
+++ b/source/libs/parser/src/parAstParser.c
@@ -97,16 +97,23 @@ typedef struct SCollectMetaKeyCxt {
typedef struct SCollectMetaKeyFromExprCxt {
SCollectMetaKeyCxt* pComCxt;
+ bool hasLastRow;
int32_t errCode;
} SCollectMetaKeyFromExprCxt;
static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt);
static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) {
- if (fmIsBuiltinFunc(pFunc->functionName)) {
- return DEAL_RES_CONTINUE;
+ switch (fmGetFuncType(pFunc->functionName)) {
+ case FUNCTION_TYPE_LAST_ROW:
+ pCxt->hasLastRow = true;
+ break;
+ case FUNCTION_TYPE_UDF:
+ pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
+ break;
+ default:
+ break;
}
- pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
}
@@ -136,9 +143,6 @@ static int32_t collectMetaKeyFromRealTableImpl(SCollectMetaKeyCxt* pCxt, const c
if (TSDB_CODE_SUCCESS == code && (0 == strcmp(pTable, TSDB_INS_TABLE_DNODE_VARIABLES))) {
code = reserveDnodeRequiredInCache(pCxt->pMetaCache);
}
- if (TSDB_CODE_SUCCESS == code) {
- code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pDb, pCxt->pMetaCache);
- }
return code;
}
@@ -185,9 +189,19 @@ static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOpera
return code;
}
+static int32_t reserveDbCfgForLastRow(SCollectMetaKeyCxt* pCxt, SNode* pTable) {
+ if (NULL == pTable || QUERY_NODE_REAL_TABLE != nodeType(pTable)) {
+ return TSDB_CODE_SUCCESS;
+ }
+ return reserveDbCfgInCache(pCxt->pParseCxt->acctId, ((SRealTableNode*)pTable)->table.dbName, pCxt->pMetaCache);
+}
+
static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) {
- SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS};
+ SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .hasLastRow = false, .errCode = TSDB_CODE_SUCCESS};
nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt);
+ if (TSDB_CODE_SUCCESS == cxt.errCode && cxt.hasLastRow) {
+ cxt.errCode = reserveDbCfgForLastRow(pCxt, pStmt->pFromTable);
+ }
return cxt.errCode;
}
@@ -365,7 +379,7 @@ static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt
}
static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS,
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS,
pCxt->pMetaCache);
}
@@ -411,7 +425,7 @@ static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt
}
static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS,
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TOPICS,
pCxt->pMetaCache);
}
@@ -506,7 +520,7 @@ static int32_t collectMetaKeyFromShowBlockDist(SCollectMetaKeyCxt* pCxt, SShowTa
}
static int32_t collectMetaKeyFromShowSubscriptions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS,
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS,
pCxt->pMetaCache);
}
diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c
index 76a8d42fa0..2ac8f11b78 100644
--- a/source/libs/parser/src/parInsert.c
+++ b/source/libs/parser/src/parInsert.c
@@ -502,6 +502,10 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int
return func(pMsgBuf, NULL, 0, param);
}
+ if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid numeric data", pToken->z);
+ }
+
switch (pSchema->type) {
case TSDB_DATA_TYPE_BOOL: {
if ((pToken->type == TK_NK_BOOL || pToken->type == TK_NK_STRING) && (pToken->n != 0)) {
@@ -1665,6 +1669,10 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
pDb = taosHashIterate(context.pDbFNameHashObj, pDb);
}
}
+ if (pContext->pStmtCb) {
+ context.pVgroupsHashObj = NULL;
+ context.pTableBlockHashObj = NULL;
+ }
destroyInsertParseContext(&context);
return code;
}
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 54c759fa39..a88b7b417f 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -142,8 +142,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_STREAMS_STMT,
- .pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
- .pTableName = TSDB_PERFS_TABLE_STREAMS,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_STREAMS,
.numOfShowCols = 1,
.pShowCols = {"stream_name"}
},
@@ -184,8 +184,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_TOPICS_STMT,
- .pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
- .pTableName = TSDB_PERFS_TABLE_TOPICS,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_TOPICS,
.numOfShowCols = 1,
.pShowCols = {"topic_name"}
},
@@ -240,8 +240,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
- .pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
- .pTableName = TSDB_PERFS_TABLE_SUBSCRIPTIONS,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_SUBSCRIPTIONS,
.numOfShowCols = 1,
.pShowCols = {"*"}
},
@@ -2160,15 +2160,16 @@ static int32_t setTableIndex(STranslateContext* pCxt, SName* pName, SRealTableNo
return TSDB_CODE_SUCCESS;
}
-static int32_t setTableCacheLastMode(STranslateContext* pCxt, SName* pName, SRealTableNode* pRealTable) {
- if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType) {
+static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSelect) {
+ if (!pSelect->hasLastRowFunc || QUERY_NODE_REAL_TABLE != nodeType(pSelect->pFromTable)) {
return TSDB_CODE_SUCCESS;
}
- SDbCfgInfo dbCfg = {0};
- int32_t code = getDBCfg(pCxt, pRealTable->table.dbName, &dbCfg);
+ SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable;
+ SDbCfgInfo dbCfg = {0};
+ int32_t code = getDBCfg(pCxt, pTable->table.dbName, &dbCfg);
if (TSDB_CODE_SUCCESS == code) {
- pRealTable->cacheLastMode = dbCfg.cacheLast;
+ pTable->cacheLastMode = dbCfg.cacheLast;
}
return code;
}
@@ -2192,9 +2193,6 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
if (TSDB_CODE_SUCCESS == code) {
code = setTableIndex(pCxt, &name, pRealTable);
}
- if (TSDB_CODE_SUCCESS == code) {
- code = setTableCacheLastMode(pCxt, &name, pRealTable);
- }
}
if (TSDB_CODE_SUCCESS == code) {
pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision;
@@ -2273,10 +2271,14 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) {
if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
SColumnNode* pCol = (SColumnNode*)pExpr;
len = snprintf(buf, sizeof(buf), "%s(%s.%s)", pSrcFunc->functionName, pCol->tableAlias, pCol->colName);
+ strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
+ len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pCol->colName);
+ strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1));
} else {
len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pExpr->aliasName);
+ strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
+ strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1));
}
- strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
return (SNode*)pFunc;
}
@@ -2475,13 +2477,65 @@ static int32_t translateOrderBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
return code;
}
+static EDealRes needFillImpl(SNode* pNode, void* pContext) {
+ if (isAggFunc(pNode) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) {
+ *(bool*)pContext = true;
+ return DEAL_RES_END;
+ }
+ return DEAL_RES_CONTINUE;
+}
+
+static bool needFill(SNode* pNode) {
+ bool hasFillFunc = false;
+ nodesWalkExpr(pNode, needFillImpl, &hasFillFunc);
+ return hasFillFunc;
+}
+
+static bool mismatchFillDataType(SDataType origDt, SDataType fillDt) {
+ if (TSDB_DATA_TYPE_NULL == fillDt.type) {
+ return false;
+ }
+ if (IS_NUMERIC_TYPE(origDt.type) && !IS_NUMERIC_TYPE(fillDt.type)) {
+ return true;
+ }
+ if (IS_VAR_DATA_TYPE(origDt.type) && !IS_VAR_DATA_TYPE(fillDt.type)) {
+ return true;
+ }
+ return false;
+}
+
+static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList) {
+ if (FILL_MODE_VALUE != pFill->mode) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t fillNo = 0;
+ SNodeListNode* pFillValues = (SNodeListNode*)pFill->pValues;
+ SNode* pProject = NULL;
+ FOREACH(pProject, pProjectionList) {
+ if (needFill(pProject)) {
+ if (fillNo >= LIST_LENGTH(pFillValues->pNodeList)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
+ }
+ if (mismatchFillDataType(((SExprNode*)pProject)->resType,
+ ((SExprNode*)nodesListGetNode(pFillValues->pNodeList, fillNo))->resType)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
+ }
+ ++fillNo;
+ }
+ }
+ if (fillNo != LIST_LENGTH(pFillValues->pNodeList)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t translateFillValues(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow) ||
NULL == ((SIntervalWindowNode*)pSelect->pWindow)->pFill) {
return TSDB_CODE_SUCCESS;
}
- SFillNode* pFill = (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill;
- return TSDB_CODE_SUCCESS;
+ return checkFillValues(pCxt, (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill, pSelect->pProjectionList);
}
static int32_t rewriteProjectAlias(SNodeList* pProjectionList) {
@@ -3088,6 +3142,9 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) {
code = replaceOrderByAliasForSelect(pCxt, pSelect);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setTableCacheLastMode(pCxt, pSelect);
+ }
return code;
}
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 7e27132f3c..7ee6a5b223 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -136,8 +136,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
}
static EDealRes rewriteQueryExprAliasImpl(SNode* pNode, void* pContext) {
- if (nodesIsExprNode(pNode) && QUERY_NODE_COLUMN != nodeType(pNode) && '\0' == ((SExprNode*)pNode)->userAlias[0]) {
- strcpy(((SExprNode*)pNode)->userAlias, ((SExprNode*)pNode)->aliasName);
+ if (nodesIsExprNode(pNode) && QUERY_NODE_COLUMN != nodeType(pNode)) {
sprintf(((SExprNode*)pNode)->aliasName, "#%d", *(int32_t*)pContext);
++(*(int32_t*)pContext);
}
diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp
index b376c33d1a..cd7a9d549a 100644
--- a/source/libs/parser/test/mockCatalog.cpp
+++ b/source/libs/parser/test/mockCatalog.cpp
@@ -137,7 +137,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) {
}
{
ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS, TSDB_SYSTEM_TABLE, 1)
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS, TSDB_SYSTEM_TABLE, 1)
.addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
@@ -149,7 +149,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) {
}
{
ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS, TSDB_SYSTEM_TABLE, 1)
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS, TSDB_SYSTEM_TABLE, 1)
.addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index 0667c5f5b9..bf72f52105 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -197,28 +197,21 @@ static EScanType getScanType(SLogicPlanContext* pCxt, SNodeList* pScanPseudoCols
return SCAN_TYPE_TABLE;
}
-static SNode* createPrimaryKeyCol(uint64_t tableId) {
+static SNode* createFirstCol(uint64_t tableId, const SSchema* pSchema) {
SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
if (NULL == pCol) {
return NULL;
}
- pCol->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP;
- pCol->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes;
+ pCol->node.resType.type = pSchema->type;
+ pCol->node.resType.bytes = pSchema->bytes;
pCol->tableId = tableId;
- pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ pCol->colId = pSchema->colId;
pCol->colType = COLUMN_TYPE_COLUMN;
- strcpy(pCol->colName, "#primarykey");
+ strcpy(pCol->colName, pSchema->name);
return (SNode*)pCol;
}
-static int32_t addPrimaryKeyCol(uint64_t tableId, SNodeList** pCols) {
- if (NULL == *pCols) {
- *pCols = nodesMakeList();
- if (NULL == *pCols) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- }
-
+static int32_t addPrimaryKeyCol(uint64_t tableId, const SSchema* pSchema, SNodeList** pCols) {
bool found = false;
SNode* pCol = NULL;
FOREACH(pCol, *pCols) {
@@ -229,13 +222,25 @@ static int32_t addPrimaryKeyCol(uint64_t tableId, SNodeList** pCols) {
}
if (!found) {
- if (TSDB_CODE_SUCCESS != nodesListStrictAppend(*pCols, createPrimaryKeyCol(tableId))) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
+ return nodesListMakeStrictAppend(pCols, createFirstCol(tableId, pSchema));
}
return TSDB_CODE_SUCCESS;
}
+static int32_t addSystableFirstCol(uint64_t tableId, const SSchema* pSchema, SNodeList** pCols) {
+ if (LIST_LENGTH(*pCols) > 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+ return nodesListMakeStrictAppend(pCols, createFirstCol(tableId, pSchema));
+}
+
+static int32_t addDefaultScanCol(const STableMeta* pMeta, SNodeList** pCols) {
+ if (TSDB_SYSTEM_TABLE == pMeta->tableType) {
+ return addSystableFirstCol(pMeta->uid, pMeta->schema, pCols);
+ }
+ return addPrimaryKeyCol(pMeta->uid, pMeta->schema, pCols);
+}
+
static int32_t makeScanLogicNode(SLogicPlanContext* pCxt, SRealTableNode* pRealTable, bool hasRepeatScanFuncs,
SLogicNode** pLogicNode) {
SScanLogicNode* pScan = (SScanLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_SCAN);
@@ -299,8 +304,8 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
pScan->hasNormalCols = true;
}
- if (TSDB_CODE_SUCCESS == code && SCAN_TYPE_SYSTEM_TABLE != pScan->scanType) {
- code = addPrimaryKeyCol(pScan->tableId, &pScan->pScanCols);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = addDefaultScanCol(pRealTable->pMeta, &pScan->pScanCols);
}
// set output
@@ -787,10 +792,8 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele
static EDealRes needFillValueImpl(SNode* pNode, void* pContext) {
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
SColumnNode* pCol = (SColumnNode*)pNode;
- if (COLUMN_TYPE_WINDOW_START != pCol->colType &&
- COLUMN_TYPE_WINDOW_END != pCol->colType &&
- COLUMN_TYPE_WINDOW_DURATION != pCol->colType &&
- COLUMN_TYPE_GROUP_KEY != pCol->colType) {
+ if (COLUMN_TYPE_WINDOW_START != pCol->colType && COLUMN_TYPE_WINDOW_END != pCol->colType &&
+ COLUMN_TYPE_WINDOW_DURATION != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) {
*(bool*)pContext = true;
return DEAL_RES_END;
}
@@ -1008,7 +1011,8 @@ static int32_t createPartitionLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pS
int32_t code =
nodesCollectColumns(pSelect, SQL_CLAUSE_PARTITION_BY, NULL, COLLECT_COL_TYPE_ALL, &pPartition->node.pTargets);
if (TSDB_CODE_SUCCESS == code && NULL == pPartition->node.pTargets) {
- code = nodesListMakeStrictAppend(&pPartition->node.pTargets, nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)));
+ code = nodesListMakeStrictAppend(&pPartition->node.pTargets,
+ nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)));
}
if (TSDB_CODE_SUCCESS == code) {
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index 653291d3a4..c2f1d71b18 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -1665,7 +1665,10 @@ static bool eliminateProjOptMayBeOptimized(SLogicNode* pNode) {
return false;
}
- if (QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren)) {
+ // Super table scan requires project operator to merge packets to improve performance.
+ if (QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren) ||
+ (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(nodesListGetNode(pNode->pChildren, 0)) &&
+ TSDB_SUPER_TABLE == ((SScanLogicNode*)nodesListGetNode(pNode->pChildren, 0))->tableType)) {
return false;
}
diff --git a/source/libs/planner/test/planSysTbTest.cpp b/source/libs/planner/test/planSysTbTest.cpp
index 921f86f09a..6b40e381cc 100644
--- a/source/libs/planner/test/planSysTbTest.cpp
+++ b/source/libs/planner/test/planSysTbTest.cpp
@@ -32,3 +32,9 @@ TEST_F(PlanSysTableTest, informationSchema) {
run("SELECT * FROM information_schema.ins_databases WHERE name = 'information_schema'");
}
+
+TEST_F(PlanSysTableTest, withAgg) {
+ useDb("root", "information_schema");
+
+ run("SELECT COUNT(1) FROM ins_users");
+}
diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp
index f904643be9..96f7d29230 100644
--- a/source/libs/planner/test/planTestUtil.cpp
+++ b/source/libs/planner/test/planTestUtil.cpp
@@ -278,12 +278,12 @@ class PlannerTestBaseImpl {
}
void dump(DumpModule module) {
+ cout << "========================================== " << sqlNo_ << " sql : [" << stmtEnv_.sql_ << "]" << endl;
+
if (DUMP_MODULE_NOTHING == module) {
return;
}
- cout << "========================================== " << sqlNo_ << " sql : [" << stmtEnv_.sql_ << "]" << endl;
-
if (DUMP_MODULE_ALL == module || DUMP_MODULE_PARSER == module) {
if (res_.prepareAst_.empty()) {
cout << "+++++++++++++++++++++syntax tree : " << endl;
diff --git a/source/libs/scalar/inc/filterInt.h b/source/libs/scalar/inc/filterInt.h
index 87327a3657..e7695b2f04 100644
--- a/source/libs/scalar/inc/filterInt.h
+++ b/source/libs/scalar/inc/filterInt.h
@@ -276,7 +276,7 @@ struct SFilterInfo {
#define FILTER_CLR_FLAG(st, f) st &= (~f)
#define SIMPLE_COPY_VALUES(dst, src) *((int64_t *)dst) = *((int64_t *)src)
-#define FILTER_PACKAGE_UNIT_HASH_KEY(v, optr, idx1, idx2) do { char *_t = (char *)v; _t[0] = optr; *(uint32_t *)(_t + 1) = idx1; *(uint32_t *)(_t + 3) = idx2; } while (0)
+#define FLT_PACKAGE_UNIT_HASH_KEY(v, op1, op2, lidx, ridx, ridx2) do { char *_t = (char *)(v); _t[0] = (op1); _t[1] = (op2); *(uint32_t *)(_t + 2) = (lidx); *(uint32_t *)(_t + 2 + sizeof(uint32_t)) = (ridx); } while (0)
#define FILTER_GREATER(cr,sflag,eflag) ((cr > 0) || ((cr == 0) && (FILTER_GET_FLAG(sflag,RANGE_FLG_EXCLUDE) || FILTER_GET_FLAG(eflag,RANGE_FLG_EXCLUDE))))
#define FILTER_COPY_RA(dst, src) do { (dst)->sflag = (src)->sflag; (dst)->eflag = (src)->eflag; (dst)->s = (src)->s; (dst)->e = (src)->e; } while (0)
diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c
index 5555a52c8e..9e67635437 100644
--- a/source/libs/scalar/src/filter.c
+++ b/source/libs/scalar/src/filter.c
@@ -1068,14 +1068,14 @@ int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *f
return TSDB_CODE_SUCCESS;
}
-int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint32_t *uidx) {
+int32_t filterAddUnitImpl(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint8_t optr2, SFilterFieldId *right2, uint32_t *uidx) {
if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) {
if (info->pctx.unitHash == NULL) {
info->pctx.unitHash = taosHashInit(FILTER_DEFAULT_GROUP_SIZE * FILTER_DEFAULT_UNIT_SIZE, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, false);
} else {
- int64_t v = 0;
- FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1);
- void *hu = taosHashGet(info->pctx.unitHash, &v, sizeof(v));
+ char v[14] = {0};
+ FLT_PACKAGE_UNIT_HASH_KEY(&v, optr, optr2, left->idx, (right ? right->idx : -1), (right2 ? right2->idx : -1));
+ void *hu = taosHashGet(info->pctx.unitHash, v, sizeof(v));
if (hu) {
*uidx = *(uint32_t *)hu;
return TSDB_CODE_SUCCESS;
@@ -1097,7 +1097,11 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
if (right) {
u->right = *right;
}
-
+ u->compare.optr2 = optr2;
+ if (right2) {
+ u->right2 = *right2;
+ }
+
if (u->right.type == FLD_TYPE_VALUE) {
SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u);
assert(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE));
@@ -1118,9 +1122,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
*uidx = info->unitNum;
if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) {
- int64_t v = 0;
- FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1);
- taosHashPut(info->pctx.unitHash, &v, sizeof(v), uidx, sizeof(*uidx));
+ char v[14] = {0};
+ FLT_PACKAGE_UNIT_HASH_KEY(&v, optr, optr2, left->idx, (right ? right->idx : -1), (right2 ? right2->idx : -1));
+ taosHashPut(info->pctx.unitHash, v, sizeof(v), uidx, sizeof(*uidx));
}
++info->unitNum;
@@ -1129,6 +1133,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
}
+int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint32_t *uidx) {
+ return filterAddUnitImpl(info, optr, left, right, 0, NULL, uidx);
+}
int32_t filterAddUnitToGroup(SFilterGroup *group, uint32_t unitIdx) {
if (group->unitNum >= group->unitSize) {
@@ -1305,8 +1312,8 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan
SIMPLE_COPY_VALUES(data2, &ra->e);
filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true);
- filterAddUnit(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx);
- filterAddUnitRight(dst, FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx);
+ filterAddUnitImpl(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right,
+ FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, &uidx);
filterAddUnitToGroup(g, uidx);
return TSDB_CODE_SUCCESS;
}
@@ -1380,8 +1387,8 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan
SIMPLE_COPY_VALUES(data2, &r->ra.e);
filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true);
- filterAddUnit(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx);
- filterAddUnitRight(dst, FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx);
+ filterAddUnitImpl(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right,
+ FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, &uidx);
filterAddUnitToGroup(g, uidx);
}
@@ -2231,6 +2238,44 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t
return TSDB_CODE_SUCCESS;
}
+bool filterIsSameUnits(SFilterColInfo* pCol1, SFilterColInfo* pCol2) {
+ if (pCol1->type != pCol2->type) {
+ return false;
+ }
+
+ if (RANGE_TYPE_MR_CTX == pCol1->type) {
+ SFilterRangeCtx* pCtx1 = (SFilterRangeCtx*)pCol1->info;
+ SFilterRangeCtx* pCtx2 = (SFilterRangeCtx*)pCol2->info;
+
+ if ((pCtx1->isnull != pCtx2->isnull) || (pCtx1->notnull != pCtx2->notnull) || (pCtx1->isrange != pCtx2->isrange)) {
+ return false;
+ }
+
+
+ SFilterRangeNode* pNode1 = pCtx1->rs;
+ SFilterRangeNode* pNode2 = pCtx2->rs;
+
+ while (true) {
+ if (NULL == pNode1 && NULL == pNode2) {
+ break;
+ }
+
+ if (NULL == pNode1 || NULL == pNode2) {
+ return false;
+ }
+
+ if (pNode1->ra.s != pNode2->ra.s || pNode1->ra.e != pNode2->ra.e || pNode1->ra.sflag != pNode2->ra.sflag || pNode1->ra.eflag != pNode2->ra.eflag) {
+ return false;
+ }
+
+ pNode1 = pNode1->next;
+ pNode2 = pNode2->next;
+ }
+ }
+
+ return true;
+}
+
void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool *conflict) {
uint32_t idx1 = 0, idx2 = 0, m = 0, n = 0;
bool equal = false;
@@ -2256,6 +2301,11 @@ void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool
return;
}
+ if (!filterIsSameUnits(&gRes1->colInfo[idx1], &gRes2->colInfo[idx2])) {
+ *conflict = true;
+ return;
+ }
+
// for long in operation
if (gRes1->colInfo[idx1].optr == OP_TYPE_EQUAL && gRes2->colInfo[idx2].optr == OP_TYPE_EQUAL) {
SFilterRangeCtx* ctx = gRes1->colInfo[idx1].info;
@@ -2869,17 +2919,22 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3
for (uint32_t g = 0; g < info->groupNum; ++g) {
SFilterGroup *group = &info->groups[g];
+ // first is block unint num for a group, following append unitNum blkUnitIdx for this group
*unitNum = group->unitNum;
all = 0;
empty = 0;
+ // save group idx start pointer
+ uint32_t * pGroupIdx = unitIdx;
for (uint32_t u = 0; u < group->unitNum; ++u) {
uint32_t uidx = group->unitIdxs[u];
if (info->blkUnitRes[uidx] == 1) {
+ // blkUnitRes == 1 is always true, so need not compare every time, delete this unit from group
--(*unitNum);
all = 1;
continue;
} else if (info->blkUnitRes[uidx] == -1) {
+ // blkUnitRes == -1 is alwary false, so in group is alwary false, need delete this group from blkGroupNum
*unitNum = 0;
empty = 1;
break;
@@ -2889,6 +2944,9 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3
}
if (*unitNum == 0) {
+ // if unit num is zero, reset unitIdx to start on this group
+ unitIdx = pGroupIdx;
+
--info->blkGroupNum;
assert(empty || all);
diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c
index 07c4fa8429..3bda9bcd51 100644
--- a/source/libs/sync/src/syncIndexMgr.c
+++ b/source/libs/sync/src/syncIndexMgr.c
@@ -163,6 +163,7 @@ int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pR
}
}
ASSERT(0);
+ return -1;
}
void syncIndexMgrSetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t recvTime) {
@@ -190,6 +191,7 @@ int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRa
}
}
ASSERT(0);
+ return -1;
}
// for debug -------------------
@@ -245,4 +247,5 @@ SyncTerm syncIndexMgrGetTerm(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftI
}
}
ASSERT(0);
+ return -1;
}
\ No newline at end of file
diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c
index 5489a107e7..0be3392a9a 100644
--- a/source/libs/sync/src/syncSnapshot.c
+++ b/source/libs/sync/src/syncSnapshot.c
@@ -583,7 +583,7 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap
&(pReceiver->snapshot));
if (code != 0) {
syncNodeErrorLog(pReceiver->pSyncNode, "snapshot stop writer true error");
- ASSERT(0);
+ // ASSERT(0);
return -1;
}
pReceiver->pWriter = NULL;
diff --git a/source/libs/sync/src/syncTimeout.c b/source/libs/sync/src/syncTimeout.c
index af15c377fb..c3c8131cbb 100644
--- a/source/libs/sync/src/syncTimeout.c
+++ b/source/libs/sync/src/syncTimeout.c
@@ -91,16 +91,16 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
} else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
if (atomic_load_64(&ths->electTimerLogicClockUser) <= pMsg->logicClock) {
++(ths->electTimerCounter);
- sInfo("vgId:%d, sync timeout, type:election count:%d, electTimerLogicClockUser:%ld", ths->vgId,
- ths->electTimerCounter, ths->electTimerLogicClockUser);
+ sTrace("vgId:%d, sync timer, type:election count:%d, electTimerLogicClockUser:%ld", ths->vgId,
+ ths->electTimerCounter, ths->electTimerLogicClockUser);
syncNodeElect(ths);
}
} else if (pMsg->timeoutType == SYNC_TIMEOUT_HEARTBEAT) {
if (atomic_load_64(&ths->heartbeatTimerLogicClockUser) <= pMsg->logicClock) {
++(ths->heartbeatTimerCounter);
- sInfo("vgId:%d, sync timeout, type:replicate count:%d, heartbeatTimerLogicClockUser:%ld", ths->vgId,
- ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser);
+ sTrace("vgId:%d, sync timer, type:replicate count:%d, heartbeatTimerLogicClockUser:%ld", ths->vgId,
+ ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser);
syncNodeReplicate(ths, true);
}
} else {
diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c
index 386ea95dd7..275e7b42cc 100644
--- a/source/libs/transport/src/thttp.c
+++ b/source/libs/transport/src/thttp.c
@@ -21,6 +21,7 @@
#include "taoserror.h"
#include "tlog.h"
+// clang-format on
#define HTTP_RECV_BUF_SIZE 1024
@@ -29,7 +30,7 @@ typedef struct SHttpClient {
uv_tcp_t tcp;
uv_write_t req;
uv_buf_t* wbuf;
- char *rbuf;
+ char* rbuf;
char* addr;
uint16_t port;
} SHttpClient;
@@ -130,35 +131,36 @@ static void destroyHttpClient(SHttpClient* cli) {
taosMemoryFree(cli->rbuf);
taosMemoryFree(cli->addr);
taosMemoryFree(cli);
-
}
static void clientCloseCb(uv_handle_t* handle) {
SHttpClient* cli = handle->data;
destroyHttpClient(cli);
}
-static void clientAllocBuffCb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
- SHttpClient* cli = handle->data;
- buf->base = cli->rbuf;
- buf->len = HTTP_RECV_BUF_SIZE;
+static void clientAllocBuffCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) {
+ SHttpClient* cli = handle->data;
+ buf->base = cli->rbuf;
+ buf->len = HTTP_RECV_BUF_SIZE;
}
-static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t *buf) {
- SHttpClient* cli = handle->data;
+static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) {
+ SHttpClient* cli = handle->data;
if (nread < 0) {
uError("http-report recv error:%s", uv_err_name(nread));
} else {
uTrace("http-report succ to recv %d bytes, just ignore it", nread);
}
uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
-}
+}
static void clientSentCb(uv_write_t* req, int32_t status) {
SHttpClient* cli = req->data;
if (status != 0) {
terrno = TAOS_SYSTEM_ERROR(status);
uError("http-report failed to send data %s", uv_strerror(status));
+ uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
+ return;
} else {
uTrace("http-report succ to send data");
}
- uv_read_start((uv_stream_t *)&cli->tcp, clientAllocBuffCb, clientRecvCb);
+ uv_read_start((uv_stream_t*)&cli->tcp, clientAllocBuffCb, clientRecvCb);
}
static void clientConnCb(uv_connect_t* req, int32_t status) {
SHttpClient* cli = req->data;
@@ -210,7 +212,7 @@ int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32
cli->tcp.data = cli;
cli->req.data = cli;
cli->wbuf = wb;
- cli->rbuf = taosMemoryCalloc(1, HTTP_RECV_BUF_SIZE);
+ cli->rbuf = taosMemoryCalloc(1, HTTP_RECV_BUF_SIZE);
cli->addr = tstrdup(server);
cli->port = port;
@@ -231,4 +233,3 @@ int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32
uv_loop_close(loop);
return terrno;
}
-// clang-format on
diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c
index 0a0dcef378..9e0a8f2a10 100644
--- a/source/libs/transport/src/trans.c
+++ b/source/libs/transport/src/trans.c
@@ -43,7 +43,7 @@ void* rpcOpen(const SRpcInit* pInit) {
return NULL;
}
if (pInit->label) {
- tstrncpy(pRpc->label, pInit->label, strlen(pInit->label) + 1);
+ tstrncpy(pRpc->label, pInit->label, TSDB_LABEL_LEN);
}
// register callback handle
pRpc->cfp = pInit->cfp;
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index 7052b0b915..41688c7330 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -16,7 +16,7 @@
#include "transComm.h"
typedef struct SConnList {
- queue conn;
+ queue conns;
int32_t size;
} SConnList;
@@ -107,11 +107,11 @@ static void doCloseIdleConn(void* param);
static void cliReadTimeoutCb(uv_timer_t* handle);
// register timer in each thread to clear expire conn
// static void cliTimeoutCb(uv_timer_t* handle);
-// alloc buf for recv
+// alloc buffer for recv
static void cliAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
-// callback after read nbytes from socket
+// callback after recv nbytes from socket
static void cliRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf);
-// callback after write data to socket
+// callback after send data to socket
static void cliSendCb(uv_write_t* req, int status);
// callback after conn to server
static void cliConnCb(uv_connect_t* req, int status);
@@ -129,19 +129,14 @@ static SCliConn* cliCreateConn(SCliThrd* thrd);
static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle or not*/);
static void cliDestroy(uv_handle_t* handle);
static void cliSend(SCliConn* pConn);
+static void cliDestroyConnMsgs(SCliConn* conn, bool destroy);
-static bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx) {
- if (code != 0) return false;
- if (pCtx->retryCnt == 0) return false;
- if (transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) return false;
- return true;
-}
+// cli util func
+static bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx);
+static void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr);
+
+static int32_t cliBuildExceptResp(SCliMsg* pMsg, STransMsg* resp);
-void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr);
-/*
- * set TCP connection timeout per-socket level
- */
-static int cliCreateSocket();
// process data read from server, add decompress etc later
static void cliHandleResp(SCliConn* conn);
// handle except about conn
@@ -169,15 +164,14 @@ static void destroyThrdObj(SCliThrd* pThrd);
static void cliWalkCb(uv_handle_t* handle, void* arg);
static void cliReleaseUnfinishedMsg(SCliConn* conn) {
- SCliMsg* pMsg = NULL;
for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) {
- pMsg = transQueueGet(&conn->cliMsgs, i);
- if (pMsg != NULL && pMsg->ctx != NULL) {
- if (conn->ctx.freeFunc != NULL) {
- conn->ctx.freeFunc(pMsg->ctx->ahandle);
+ SCliMsg* msg = transQueueGet(&conn->cliMsgs, i);
+ if (msg != NULL && msg->ctx != NULL) {
+ if (conn->ctx.freeFunc != NULL && msg->ctx->ahandle != NULL) {
+ conn->ctx.freeFunc(msg->ctx->ahandle);
}
}
- destroyCmsg(pMsg);
+ destroyCmsg(msg);
}
}
#define CLI_RELEASE_UV(loop) \
@@ -217,8 +211,10 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
} \
if (i == sz) { \
pMsg = NULL; \
+ tDebug("msg not found, %" PRIu64 "", ahandle); \
} else { \
pMsg = transQueueRm(&conn->cliMsgs, i); \
+ tDebug("msg found, %" PRIu64 "", ahandle); \
} \
} while (0)
#define CONN_GET_NEXT_SENDMSG(conn) \
@@ -470,8 +466,8 @@ void* createConnPool(int size) {
void* destroyConnPool(void* pool) {
SConnList* connList = taosHashIterate((SHashObj*)pool, NULL);
while (connList != NULL) {
- while (!QUEUE_IS_EMPTY(&connList->conn)) {
- queue* h = QUEUE_HEAD(&connList->conn);
+ while (!QUEUE_IS_EMPTY(&connList->conns)) {
+ queue* h = QUEUE_HEAD(&connList->conns);
SCliConn* c = QUEUE_DATA(h, SCliConn, q);
cliDestroyConn(c, true);
}
@@ -484,21 +480,21 @@ void* destroyConnPool(void* pool) {
static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) {
char key[32] = {0};
CONN_CONSTRUCT_HASH_KEY(key, ip, port);
- SHashObj* pPool = pool;
- SConnList* plist = taosHashGet(pPool, key, strlen(key));
+
+ SConnList* plist = taosHashGet((SHashObj*)pool, key, strlen(key));
if (plist == NULL) {
SConnList list = {0};
- taosHashPut(pPool, key, strlen(key), (void*)&list, sizeof(list));
- plist = taosHashGet(pPool, key, strlen(key));
- QUEUE_INIT(&plist->conn);
+ taosHashPut((SHashObj*)pool, key, strlen(key), (void*)&list, sizeof(list));
+ plist = taosHashGet((SHashObj*)pool, key, strlen(key));
+ QUEUE_INIT(&plist->conns);
}
- if (QUEUE_IS_EMPTY(&plist->conn)) {
+ if (QUEUE_IS_EMPTY(&plist->conns)) {
return NULL;
}
plist->size -= 1;
- queue* h = QUEUE_HEAD(&plist->conn);
+ queue* h = QUEUE_HEAD(&plist->conns);
SCliConn* conn = QUEUE_DATA(h, SCliConn, q);
conn->status = ConnNormal;
QUEUE_REMOVE(&conn->q);
@@ -514,22 +510,21 @@ static void addConnToPool(void* pool, SCliConn* conn) {
if (conn->status == ConnInPool) {
return;
}
- SCliThrd* thrd = conn->hostThrd;
- CONN_HANDLE_THREAD_QUIT(thrd);
-
allocConnRef(conn, true);
+ SCliThrd* thrd = conn->hostThrd;
if (conn->timer != NULL) {
uv_timer_stop(conn->timer);
taosArrayPush(thrd->timerList, &conn->timer);
conn->timer->data = NULL;
conn->timer = NULL;
}
+ if (T_REF_VAL_GET(conn) > 1) {
+ transUnrefCliHandle(conn);
+ }
+
+ cliDestroyConnMsgs(conn, false);
- STrans* pTransInst = thrd->pTransInst;
- cliReleaseUnfinishedMsg(conn);
- transQueueClear(&conn->cliMsgs);
- transCtxCleanup(&conn->ctx);
conn->status = ConnInPool;
if (conn->list == NULL) {
@@ -540,18 +535,15 @@ static void addConnToPool(void* pool, SCliConn* conn) {
} else {
tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap);
}
- assert(conn->list != NULL);
- QUEUE_INIT(&conn->q);
- QUEUE_PUSH(&conn->list->conn, &conn->q);
+ QUEUE_PUSH(&conn->list->conns, &conn->q);
conn->list->size += 1;
- conn->task = NULL;
- assert(!QUEUE_IS_EMPTY(&conn->list->conn));
-
if (conn->list->size >= 50) {
STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg));
arg->param1 = conn;
arg->param2 = thrd;
+
+ STrans* pTransInst = thrd->pTransInst;
conn->task = transDQSched(thrd->timeoutQueue, doCloseIdleConn, arg, CONN_PERSIST_TIME(pTransInst->idleTime));
}
}
@@ -691,11 +683,10 @@ static void cliDestroy(uv_handle_t* handle) {
transRemoveExHandle(transGetRefMgt(), conn->refId);
taosMemoryFree(conn->ip);
- conn->stream->data = NULL;
taosMemoryFree(conn->stream);
- transCtxCleanup(&conn->ctx);
- cliReleaseUnfinishedMsg(conn);
- transQueueDestroy(&conn->cliMsgs);
+
+ cliDestroyConnMsgs(conn, true);
+
tTrace("%s conn %p destroy successfully", CONN_GET_INST_LABEL(conn), conn);
transReqQueueClear(&conn->wreqQueue);
transDestroyBuffer(&conn->readBuf);
@@ -738,8 +729,6 @@ static void cliSendCb(uv_write_t* req, int status) {
}
void cliSend(SCliConn* pConn) {
- CONN_HANDLE_BROKEN(pConn);
-
assert(!transQueueEmpty(&pConn->cliMsgs));
SCliMsg* pCliMsg = NULL;
@@ -756,8 +745,8 @@ void cliSend(SCliConn* pConn) {
pMsg->pCont = (void*)rpcMallocCont(0);
pMsg->contLen = 0;
}
- int msgLen = transMsgLenFromCont(pMsg->contLen);
+ int msgLen = transMsgLenFromCont(pMsg->contLen);
STransMsgHead* pHead = transHeadFromCont(pMsg->pCont);
pHead->ahandle = pCtx != NULL ? (uint64_t)pCtx->ahandle : 0;
pHead->noResp = REQUEST_NO_RESP(pMsg) ? 1 : 0;
@@ -769,8 +758,6 @@ void cliSend(SCliConn* pConn) {
pHead->traceId = pMsg->info.traceId;
pHead->magicNum = htonl(TRANS_MAGIC_NUM);
- uv_buf_t wb = uv_buf_init((char*)pHead, msgLen);
-
STraceId* trace = &pMsg->info.traceId;
tGDebug("%s conn %p %s is sent to %s, local info %s, len:%d", CONN_GET_INST_LABEL(pConn), pConn,
TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, pMsg->contLen);
@@ -792,6 +779,8 @@ void cliSend(SCliConn* pConn) {
tGTrace("%s conn %p start timer for msg:%s", CONN_GET_INST_LABEL(pConn), pConn, TMSG_INFO(pMsg->msgType));
uv_timer_start((uv_timer_t*)pConn->timer, cliReadTimeoutCb, TRANS_READ_TIMEOUT, 0);
}
+
+ uv_buf_t wb = uv_buf_init((char*)pHead, msgLen);
uv_write_t* req = transReqQueuePush(&pConn->wreqQueue);
uv_write(req, (uv_stream_t*)pConn->stream, &wb, 1, cliSendCb);
return;
@@ -807,7 +796,6 @@ void cliConnCb(uv_connect_t* req, int status) {
cliHandleExcept(pConn);
return;
}
- // int addrlen = sizeof(pConn->addr);
struct sockaddr peername, sockname;
int addrlen = sizeof(peername);
@@ -840,7 +828,7 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
int64_t refId = (int64_t)(pMsg->msg.info.handle);
SExHandle* exh = transAcquireExHandle(transGetRefMgt(), refId);
if (exh == NULL) {
- tDebug("%" PRId64 " already release", refId);
+ tDebug("%" PRId64 " already released", refId);
destroyCmsg(pMsg);
return;
}
@@ -856,6 +844,9 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
return;
}
cliSend(conn);
+ } else {
+ tError("%s conn %p already released", CONN_GET_INST_LABEL(conn), conn);
+ destroyCmsg(pMsg);
}
}
static void cliHandleUpdate(SCliMsg* pMsg, SCliThrd* pThrd) {
@@ -905,6 +896,27 @@ void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr) {
}
}
}
+
+bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx) {
+ if (code != 0) return false;
+ if (pCtx->retryCnt == 0) return false;
+ if (transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) return false;
+ return true;
+}
+
+int32_t cliBuildExceptResp(SCliMsg* pMsg, STransMsg* pResp) {
+ if (pMsg == NULL) return -1;
+
+ memset(pResp, 0, sizeof(STransMsg));
+
+ pResp->code = TSDB_CODE_RPC_BROKEN_LINK;
+ pResp->msgType = pMsg->msg.msgType + 1;
+ pResp->info.ahandle = pMsg->ctx ? pMsg->ctx->ahandle : NULL;
+ pResp->info.traceId = pMsg->msg.info.traceId;
+
+ return 0;
+}
+
void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
STrans* pTransInst = pThrd->pTransInst;
STransConnCtx* pCtx = pMsg->ctx;
@@ -920,13 +932,8 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
SCliConn* conn = cliGetConn(pMsg, pThrd, &ignore);
if (ignore == true) {
// persist conn already release by server
- STransMsg resp = {0};
- resp.code = TSDB_CODE_RPC_BROKEN_LINK;
- resp.msgType = pMsg->msg.msgType + 1;
-
- resp.info.ahandle = pMsg && pMsg->ctx ? pMsg->ctx->ahandle : NULL;
- resp.info.traceId = pMsg->msg.info.traceId;
-
+ STransMsg resp;
+ cliBuildExceptResp(pMsg, &resp);
pTransInst->cfp(pTransInst->parent, &resp, NULL);
destroyCmsg(pMsg);
return;
@@ -991,9 +998,6 @@ static void cliAsyncCb(uv_async_t* handle) {
QUEUE_REMOVE(h);
SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q);
- if (pMsg == NULL) {
- continue;
- }
(*cliAsyncHandle[pMsg->type])(pMsg, pThrd);
count++;
}
@@ -1035,24 +1039,58 @@ static void cliPrepareCb(uv_prepare_t* handle) {
if (thrd->stopMsg != NULL) cliHandleQuit(thrd->stopMsg, thrd);
}
+void cliDestroyConnMsgs(SCliConn* conn, bool destroy) {
+ transCtxCleanup(&conn->ctx);
+ cliReleaseUnfinishedMsg(conn);
+ if (destroy == 1) {
+ transQueueDestroy(&conn->cliMsgs);
+ } else {
+ transQueueClear(&conn->cliMsgs);
+ }
+}
+
+void cliIteraConnMsgs(SCliConn* conn) {
+ SCliThrd* pThrd = conn->hostThrd;
+ STrans* pTransInst = pThrd->pTransInst;
+
+ for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) {
+ SCliMsg* cmsg = transQueueGet(&conn->cliMsgs, i);
+ if (cmsg->type == Release || REQUEST_NO_RESP(&cmsg->msg) || cmsg->msg.msgType == TDMT_SCH_DROP_TASK) {
+ continue;
+ }
+
+ STransMsg resp = {0};
+ if (-1 == cliBuildExceptResp(cmsg, &resp)) {
+ continue;
+ }
+ pTransInst->cfp(pTransInst->parent, &resp, NULL);
+
+ cmsg->ctx->ahandle = NULL;
+ }
+}
bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead) {
if (pHead->release == 1 && (pHead->msgLen) == sizeof(*pHead)) {
uint64_t ahandle = pHead->ahandle;
+ tDebug("ahandle = %" PRIu64 "", ahandle);
SCliMsg* pMsg = NULL;
CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle);
+
transClearBuffer(&conn->readBuf);
transFreeMsg(transContFromHead((char*)pHead));
- if (transQueueSize(&conn->cliMsgs) > 0 && ahandle == 0) {
- SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, 0);
- if (cliMsg->type == Release) return true;
+
+ for (int i = 0; ahandle == 0 && i < transQueueSize(&conn->cliMsgs); i++) {
+ SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, i);
+ if (cliMsg->type == Release) {
+ assert(pMsg == NULL);
+ return true;
+ }
}
+
+ cliIteraConnMsgs(conn);
+
tDebug("%s conn %p receive release request, refId:%" PRId64 "", CONN_GET_INST_LABEL(conn), conn, conn->refId);
- if (T_REF_VAL_GET(conn) > 1) {
- transUnrefCliHandle(conn);
- }
destroyCmsg(pMsg);
- cliReleaseUnfinishedMsg(conn);
- transQueueClear(&conn->cliMsgs);
+
addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn);
return true;
}
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index 207b967923..7007079f87 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -492,7 +492,6 @@ void uvWorkerAsyncCb(uv_async_t* handle) {
// release handle to rpc init
if (msg->type == Quit) {
(*transAsyncHandle[msg->type])(msg, pThrd);
- continue;
} else {
STransMsg transMsg = msg->msg;
@@ -771,7 +770,7 @@ static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName) {
// conn set
QUEUE_INIT(&pThrd->conn);
- pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 1, pThrd, uvWorkerAsyncCb);
+ pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 5, pThrd, uvWorkerAsyncCb);
uv_pipe_connect(&pThrd->connect_req, pThrd->pipe, pipeName, uvOnPipeConnectionCb);
// uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
return true;
@@ -907,23 +906,30 @@ static void uvDestroyConn(uv_handle_t* handle) {
}
}
static void uvPipeListenCb(uv_stream_t* handle, int status) {
- ASSERT(status == 0);
+ if (status != 0) {
+ tError("server failed to init pipe");
+ return;
+ }
SServerObj* srv = container_of(handle, SServerObj, pipeListen);
uv_pipe_t* pipe = &(srv->pipe[srv->numOfWorkerReady][0]);
- ASSERT(0 == uv_pipe_init(srv->loop, pipe, 1));
- ASSERT(0 == uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe));
- ASSERT(1 == uv_is_readable((uv_stream_t*)pipe));
- ASSERT(1 == uv_is_writable((uv_stream_t*)pipe));
- ASSERT(0 == uv_is_closing((uv_handle_t*)pipe));
+ int ret = uv_pipe_init(srv->loop, pipe, 1);
+ assert(ret == 0);
+
+ ret = uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe);
+ assert(ret == 0);
+
+ ret = uv_is_readable((uv_stream_t*)pipe);
+ assert(ret == 1);
+
+ ret = uv_is_writable((uv_stream_t*)pipe);
+ assert(ret == 1);
+
+ ret = uv_is_closing((uv_handle_t*)pipe);
+ assert(ret == 0);
srv->numOfWorkerReady++;
-
- // ASSERT(0 == uv_listen((uv_stream_t*)&ctx.send.tcp, 512, uvOnAcceptCb));
-
- // r = uv_read_start((uv_stream_t*)&ctx.channel, alloc_cb, read_cb);
- // ASSERT(r == 0);
}
void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) {
@@ -938,7 +944,9 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
srv->port = port;
uv_loop_init(srv->loop);
- assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0));
+ int ret = uv_pipe_init(srv->loop, &srv->pipeListen, 0);
+ assert(ret == 0);
+
#ifdef WINDOWS
char pipeName[64];
snprintf(pipeName, sizeof(pipeName), "\\\\?\\pipe\\trans.rpc.%p-" PRIu64, taosSafeRand(), GetCurrentProcessId());
@@ -947,8 +955,11 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
snprintf(pipeName, sizeof(pipeName), "%s%spipe.trans.rpc.%08X-" PRIu64, tsTempDir, TD_DIRSEP, taosSafeRand(),
taosGetSelfPthreadId());
#endif
- assert(0 == uv_pipe_bind(&srv->pipeListen, pipeName));
- assert(0 == uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb));
+ ret = uv_pipe_bind(&srv->pipeListen, pipeName);
+ assert(ret == 0);
+
+ ret = uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb);
+ assert(ret == 0);
for (int i = 0; i < srv->numOfThreads; i++) {
SWorkThrd* thrd = (SWorkThrd*)taosMemoryCalloc(1, sizeof(SWorkThrd));
diff --git a/source/util/src/talgo.c b/source/util/src/talgo.c
index 5353cd9bfe..699f0db7a1 100644
--- a/source/util/src/talgo.c
+++ b/source/util/src/talgo.c
@@ -201,6 +201,7 @@ void *taosbsearch(const void *key, const void *base, int32_t nmemb, int32_t size
return (c > 0) ? p : (midx > 0 ? p - size : NULL);
} else {
ASSERT(0);
+ return NULL;
}
}
diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c
index 7032f39744..cbda4e4655 100644
--- a/source/util/src/tcompare.c
+++ b/source/util/src/tcompare.c
@@ -244,6 +244,7 @@ int32_t compareJsonVal(const void *pLeft, const void *pRight) {
return 0;
}else{
assert(0);
+ return 0;
}
}
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 662a3f0c88..3117152af6 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -121,7 +121,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, "Connection killed")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, "Syntax error in SQL")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DB_NOT_SELECTED, "Database not specified or available")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_NAME, "Table does not exist")
-TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long, check maxSQLLength config")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached")
@@ -621,7 +621,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_SCHEMA, "Rsma invalid schema
//index
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding")
-TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Invalid index file")
+TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_INVALID_FILE, "Index file is invalid")
//tmq
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")
diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c
index a2d65d6a54..46203658f1 100644
--- a/source/util/src/tlog.c
+++ b/source/util/src/tlog.c
@@ -97,7 +97,7 @@ int32_t tqDebugFlag = 135;
int32_t fsDebugFlag = 135;
int32_t metaDebugFlag = 135;
int32_t udfDebugFlag = 135;
-int32_t smaDebugFlag = 135;
+int32_t smaDebugFlag = 131;
int32_t idxDebugFlag = 135;
int64_t dbgEmptyW = 0;
@@ -446,7 +446,10 @@ static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *b
}
if (dflag & DEBUG_SCREEN) {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
write(1, buffer, (uint32_t)len);
+#pragma GCC diagnostic pop
}
}
diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c
index 4d5532b9a6..2767fed937 100644
--- a/source/util/src/tpagedbuf.c
+++ b/source/util/src/tpagedbuf.c
@@ -371,7 +371,7 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
return TSDB_CODE_SUCCESS;
}
-void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) {
+void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) {
pBuf->statis.getPages += 1;
char* availablePage = NULL;
diff --git a/source/util/src/version.c.in b/source/util/src/version.c.in
index be1a4a4048..cb307b57fc 100644
--- a/source/util/src/version.c.in
+++ b/source/util/src/version.c.in
@@ -1,4 +1,4 @@
-char version[12] = "${TD_VER_NUMBER}";
+char version[64] = "${TD_VER_NUMBER}";
char compatible_version[12] = "${TD_VER_COMPATIBLE}";
char gitinfo[48] = "${TD_VER_GIT}";
char buildinfo[64] = "Built at ${TD_VER_DATE}";
diff --git a/source/util/test/pageBufferTest.cpp b/source/util/test/pageBufferTest.cpp
index eaf198a483..1a057c5875 100644
--- a/source/util/test/pageBufferTest.cpp
+++ b/source/util/test/pageBufferTest.cpp
@@ -18,7 +18,7 @@ void simpleTest() {
int32_t pageId = 0;
int32_t groupId = 0;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
ASSERT_EQ(getTotalBufSize(pBuf), 1024);
@@ -29,26 +29,26 @@ void simpleTest() {
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t == pBufPage1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage4);
releaseBufPage(pBuf, pBufPage2);
- SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage5);
@@ -64,7 +64,7 @@ void writeDownTest() {
int32_t groupId = 0;
int32_t nx = 12345;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
*(int32_t*)(pBufPage->data) = nx;
@@ -73,22 +73,22 @@ void writeDownTest() {
setBufPageDirty(pBufPage, true);
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage1);
ASSERT_TRUE(pageId == 1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage2);
ASSERT_TRUE(pageId == 2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage3);
ASSERT_TRUE(pageId == 3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage4);
ASSERT_TRUE(pageId == 4);
@@ -113,32 +113,32 @@ void recyclePageTest() {
int32_t groupId = 0;
int32_t nx = 12345;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage1);
ASSERT_TRUE(pageId == 1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage2);
ASSERT_TRUE(pageId == 2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage3);
ASSERT_TRUE(pageId == 3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage4);
ASSERT_TRUE(pageId == 4);
releaseBufPage(pBuf, t4);
- SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t5 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t5 == pBufPage5);
ASSERT_TRUE(pageId == 5);
diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py
index a8117ec04c..7071adb3a9 100644
--- a/tests/pytest/tools/taosdumpTest2.py
+++ b/tests/pytest/tools/taosdumpTest2.py
@@ -11,15 +11,19 @@
# -*- coding: utf-8 -*-
+from logging.config import dictConfig
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
+import string
+import random
class TDTestCase:
+
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
@@ -47,12 +51,19 @@ class TDTestCase:
return ""
return paths[0]
+ def generateString(self, length):
+ chars = string.ascii_uppercase + string.ascii_lowercase
+ v = ""
+ for i in range(length):
+ v += random.choice(chars)
+ return v
+
def run(self):
if not os.path.exists("./taosdumptest/tmp"):
os.makedirs("./taosdumptest/tmp")
else:
- os.system("rm -rf ./taosdumptest/tmp")
- os.makedirs("./taosdumptest/tmp")
+ print("directory exists")
+ os.system("rm -rf ./taosdumptest/tmp/*")
tdSql.prepare()
@@ -76,17 +87,19 @@ class TDTestCase:
tdLog.info("taosdump found in %s" % binPath)
os.system("rm ./taosdumptest/tmp/*.sql")
+ os.system("rm ./taosdumptest/tmp/*.avro*")
+ os.system("rm -rf ./taosdumptest/taosdump.*")
os.system(
- "%s --databases db -o ./taosdumptest/tmp -B 32766 -L 1048576" %
+ "%s --databases db -o ./taosdumptest/tmp " %
binPath)
tdSql.execute("drop database db")
- tdSql.query("select * from information_schema.ins_databases")
+ tdSql.query("show databases")
tdSql.checkRows(2)
- os.system("%s -i ./taosdumptest/tmp" % binPath)
+ os.system("%s -i ./taosdumptest/tmp -y" % binPath)
- tdSql.query("select * from information_schema.ins_databases")
+ tdSql.query("show databases")
tdSql.checkRows(3)
tdSql.checkData(2, 0, 'db')
@@ -105,17 +118,17 @@ class TDTestCase:
"create table stb(ts timestamp, c1 binary(16374), c2 binary(16374), c3 binary(16374)) tags(t1 nchar(256))")
tdSql.execute(
"insert into t1 using stb tags('t1') values(now, '%s', '%s', '%s')" %
- ("16374",
- "16374",
- "16374"))
+ (self.generateString(16374),
+ self.generateString(16374),
+ self.generateString(16374)))
-# sys.exit(0)
os.system("rm ./taosdumptest/tmp/*.sql")
os.system("rm ./taosdumptest/tmp/*.avro*")
+ os.system("rm -rf ./taosdumptest/tmp/taosdump.*")
os.system("%s -D test -o ./taosdumptest/tmp -y" % binPath)
tdSql.execute("drop database test")
- tdSql.query("select * from information_schema.ins_databases")
+ tdSql.query("show databases")
tdSql.checkRows(3)
os.system("%s -i ./taosdumptest/tmp -y" % binPath)
diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py
index 33ef92bf73..9ffebcbdad 100644
--- a/tests/pytest/util/common.py
+++ b/tests/pytest/util/common.py
@@ -199,22 +199,22 @@ class TDCom:
res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0])
return res
- def cleanTb(self, type="taosc"):
+ def cleanTb(self, type="taosc", dbname="db"):
'''
type is taosc or restful
'''
- query_sql = "show stables"
+ query_sql = f"show {dbname}.stables"
res_row_list = tdSql.query(query_sql, True)
stb_list = map(lambda x: x[0], res_row_list)
for stb in stb_list:
if type == "taosc":
- tdSql.execute(f'drop table if exists `{stb}`')
+ tdSql.execute(f'drop table if exists {dbname}.`{stb}`')
if not stb[0].isdigit():
- tdSql.execute(f'drop table if exists {stb}')
+ tdSql.execute(f'drop table if exists {dbname}.{stb}')
elif type == "restful":
- self.restApiPost(f"drop table if exists `{stb}`")
+ self.restApiPost(f"drop table if exists {dbname}.`{stb}`")
if not stb[0].isdigit():
- self.restApiPost(f"drop table if exists {stb}")
+ self.restApiPost(f"drop table if exists {dbname}.{stb}")
def dateToTs(self, datetime_input):
return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index e530695d1e..89b7fe00eb 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -36,9 +36,9 @@ class TDSimClient:
"rpcDebugFlag": "143",
"tmrDebugFlag": "131",
"cDebugFlag": "143",
- "udebugFlag": "143",
- "jnidebugFlag": "143",
- "qdebugFlag": "143",
+ "uDebugFlag": "143",
+ "jniDebugFlag": "143",
+ "qDebugFlag": "143",
"supportVnodes": "1024",
"telemetryReporting": "0",
}
@@ -134,7 +134,6 @@ class TDDnode:
"uDebugFlag": "131",
"sDebugFlag": "143",
"wDebugFlag": "143",
- "qdebugFlag": "143",
"numOfLogLines": "100000000",
"statusInterval": "1",
"supportVnodes": "1024",
@@ -484,7 +483,7 @@ class TDDnode:
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
-
+
onlyKillOnceWindows = 0
while(processID):
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index 753c41e094..b320cf5995 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -102,7 +102,7 @@ class TDSql:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
- raise Exception(repr(e))
+ raise Exception(repr(e))
i+=1
time.sleep(1)
pass
@@ -225,25 +225,21 @@ class TDSql:
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data):
- tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
else:
if self.queryResult[row][col] == _parse_datetime(data):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
return
if str(self.queryResult[row][col]) == str(data):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
return
+
elif isinstance(data, float):
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
- tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
- tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
@@ -254,21 +250,7 @@ class TDSql:
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
- if data is None:
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- elif isinstance(data, str):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- elif isinstance(data, datetime.date):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- elif isinstance(data, float):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- else:
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
def getData(self, row, col):
self.checkRowCol(row, col)
@@ -307,7 +289,7 @@ class TDSql:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
- raise Exception(repr(e))
+ raise Exception(repr(e))
i+=1
time.sleep(1)
pass
@@ -329,7 +311,7 @@ class TDSql:
tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args)
def __check_equal(self, elm, expect_elm):
- if not type(elm) in(list, tuple) and elm == expect_elm:
+ if elm == expect_elm:
return True
if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple):
if len(elm) != len(expect_elm):
diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c
index ada2039460..f39d5e6528 100644
--- a/tests/script/api/batchprepare.c
+++ b/tests/script/api/batchprepare.c
@@ -2598,7 +2598,6 @@ void runAll(TAOS *taos) {
printf("%s Begin\n", gCaseCtrl.caseCatalog);
runCaseList(taos);
-#if 0
strcpy(gCaseCtrl.caseCatalog, "Micro DB precision Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.precision = TIME_PRECISION_MICRO;
@@ -2654,7 +2653,6 @@ void runAll(TAOS *taos) {
gCaseCtrl.bindColNum = 6;
runCaseList(taos);
gCaseCtrl.bindColNum = 0;
-#endif
/*
strcpy(gCaseCtrl.caseCatalog, "Bind Col Type Test");
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 97295d75e0..46bae734ea 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -344,6 +344,7 @@
# --- scalar ----
./test.sh -f tsim/scalar/in.sim
./test.sh -f tsim/scalar/scalar.sim
+./test.sh -f tsim/scalar/filter.sim
# ---- alter ----
./test.sh -f tsim/alter/cached_schema_after_alter.sim
diff --git a/tests/script/tsim/compute/interval.sim b/tests/script/tsim/compute/interval.sim
index 4e7960ac4a..dc11c20ec9 100644
--- a/tests/script/tsim/compute/interval.sim
+++ b/tests/script/tsim/compute/interval.sim
@@ -101,7 +101,7 @@ $ms = 1601481600000 + $cc
$cc = 1 * 60000
$ms2 = 1601481600000 - $cc
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms and ts > $ms2 interval(1m) fill(value,0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms and ts > $ms2 interval(1m) fill(value,0,0,0,0,0)
print ===> $rows
if $rows < 30 then
print expect greater than 30, actual: $rows
@@ -180,7 +180,7 @@ $ms1 = 1601481600000 + $cc
$cc = 1 * 60000
$ms2 = 1601481600000 - $cc
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $mt where ts <= $ms1 and ts > $ms2 interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $mt where ts <= $ms1 and ts > $ms2 interval(1m) fill(value, 0,0,0,0,0)
if $rows < 30 then
return -1
endi
diff --git a/tests/script/tsim/parser/columnValue_bigint.sim b/tests/script/tsim/parser/columnValue_bigint.sim
index 2cf0151a05..0a024029a5 100644
--- a/tests/script/tsim/parser/columnValue_bigint.sim
+++ b/tests/script/tsim/parser/columnValue_bigint.sim
@@ -373,7 +373,7 @@ sql_error insert into st_bigint_e7 values (now, "123abc")
sql_error insert into st_bigint_e9 values (now, abc)
sql_error insert into st_bigint_e10 values (now, "abc")
sql_error insert into st_bigint_e11 values (now, " ")
-sql insert into st_bigint_e12 values (now, '')
+sql_error insert into st_bigint_e12 values (now, '')
sql_error insert into st_bigint_e13 using mt_bigint tags (033) values (now, 9223372036854775808)
sql insert into st_bigint_e14 using mt_bigint tags (033) values (now, -9223372036854775808)
@@ -386,7 +386,7 @@ sql_error insert into st_bigint_e20 using mt_bigint tags (033) values (now, "123
sql_error insert into st_bigint_e22 using mt_bigint tags (033) values (now, abc)
sql_error insert into st_bigint_e23 using mt_bigint tags (033) values (now, "abc")
sql_error insert into st_bigint_e24 using mt_bigint tags (033) values (now, " ")
-sql insert into st_bigint_e25 using mt_bigint tags (033) values (now, '')
+sql_error insert into st_bigint_e25 using mt_bigint tags (033) values (now, '')
sql_error insert into st_bigint_e13_0 using mt_bigint tags (9223372036854775808) values (now, -033)
sql insert into st_bigint_e14_0 using mt_bigint tags (-9223372036854775808) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_double.sim b/tests/script/tsim/parser/columnValue_double.sim
index da09b77353..bfcf338fac 100644
--- a/tests/script/tsim/parser/columnValue_double.sim
+++ b/tests/script/tsim/parser/columnValue_double.sim
@@ -476,7 +476,7 @@ sql_error insert into st_double_e7 values (now, "123abc")
sql_error insert into st_double_e9 values (now, abc)
sql_error insert into st_double_e10 values (now, "abc")
sql_error insert into st_double_e11 values (now, " ")
-sql insert into st_double_e12 values (now, '')
+sql_error insert into st_double_e12 values (now, '')
sql_error insert into st_double_e13 using mt_double tags (033) values (now, 11.7976931348623157e+308)
sql_error insert into st_double_e14 using mt_double tags (033) values (now, -11.7976931348623157e+308)
@@ -489,7 +489,7 @@ sql_error insert into st_double_e20 using mt_double tags (033) values (now, "123
sql_error insert into st_double_e22 using mt_double tags (033) values (now, abc)
sql_error insert into st_double_e23 using mt_double tags (033) values (now, "abc")
sql_error insert into st_double_e24 using mt_double tags (033) values (now, " ")
-sql insert into st_double_e25_1 using mt_double tags (033) values (now, '')
+sql_error insert into st_double_e25_1 using mt_double tags (033) values (now, '')
sql_error insert into st_double_e13 using mt_double tags (31.7976931348623157e+308) values (now, -033)
sql_error insert into st_double_e14 using mt_double tags (-31.7976931348623157e+308) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_float.sim b/tests/script/tsim/parser/columnValue_float.sim
index 3e20e178c3..b2db7dff2b 100644
--- a/tests/script/tsim/parser/columnValue_float.sim
+++ b/tests/script/tsim/parser/columnValue_float.sim
@@ -506,7 +506,7 @@ sql_error insert into st_float_e7 values (now, "123abc")
sql_error insert into st_float_e9 values (now, abc)
sql_error insert into st_float_e10 values (now, "abc")
sql_error insert into st_float_e11 values (now, " ")
-sql insert into st_float_e12 values (now, '')
+sql_error insert into st_float_e12 values (now, '')
sql_error insert into st_float_e13 using mt_float tags (033) values (now, 3.50282347e+38)
sql_error insert into st_float_e14 using mt_float tags (033) values (now, -3.50282347e+38)
@@ -519,7 +519,7 @@ sql_error insert into st_float_e20 using mt_float tags (033) values (now, "123ab
sql_error insert into st_float_e22 using mt_float tags (033) values (now, abc)
sql_error insert into st_float_e23 using mt_float tags (033) values (now, "abc")
sql_error insert into st_float_e24 using mt_float tags (033) values (now, " ")
-sql insert into st_float_e25_1 using mt_float tags (033) values (now, '')
+sql_error insert into st_float_e25_1 using mt_float tags (033) values (now, '')
sql_error insert into st_float_e13 using mt_float tags (3.50282347e+38) values (now, -033)
sql_error insert into st_float_e14 using mt_float tags (-3.50282347e+38) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_int.sim b/tests/script/tsim/parser/columnValue_int.sim
index 009fbd1ede..4a3b8ebd0b 100644
--- a/tests/script/tsim/parser/columnValue_int.sim
+++ b/tests/script/tsim/parser/columnValue_int.sim
@@ -371,7 +371,7 @@ sql_error insert into st_int_e7 values (now, "123abc")
sql_error insert into st_int_e9 values (now, abc)
sql_error insert into st_int_e10 values (now, "abc")
sql_error insert into st_int_e11 values (now, " ")
-sql insert into st_int_e12 values (now, '')
+sql_error insert into st_int_e12 values (now, '')
sql_error insert into st_int_e13 using mt_int tags (033) values (now, 2147483648)
sql insert into st_int_e14 using mt_int tags (033) values (now, -2147483648)
@@ -384,7 +384,7 @@ sql_error insert into st_int_e20 using mt_int tags (033) values (now, "123abc")
sql_error insert into st_int_e22 using mt_int tags (033) values (now, abc)
sql_error insert into st_int_e23 using mt_int tags (033) values (now, "abc")
sql_error insert into st_int_e24 using mt_int tags (033) values (now, " ")
-sql insert into st_int_e25 using mt_int tags (033) values (now, '')
+sql_error insert into st_int_e25 using mt_int tags (033) values (now, '')
sql_error insert into st_int_e13 using mt_int tags (2147483648) values (now, -033)
sql insert into st_int_e14_1 using mt_int tags (-2147483648) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_smallint.sim b/tests/script/tsim/parser/columnValue_smallint.sim
index 0dcb0d85f4..eb364f3630 100644
--- a/tests/script/tsim/parser/columnValue_smallint.sim
+++ b/tests/script/tsim/parser/columnValue_smallint.sim
@@ -374,7 +374,7 @@ sql_error insert into st_smallint_e7 values (now, "123abc")
sql_error insert into st_smallint_e9 values (now, abc)
sql_error insert into st_smallint_e10 values (now, "abc")
sql_error insert into st_smallint_e11 values (now, " ")
-sql insert into st_smallint_e12 values (now, '')
+sql_error insert into st_smallint_e12 values (now, '')
sql_error insert into st_smallint_e13 using mt_smallint tags (033) values (now, 32768)
sql insert into st_smallint_e14_1 using mt_smallint tags (033) values (now, -32768)
@@ -387,7 +387,7 @@ sql_error insert into st_smallint_e20 using mt_smallint tags (033) values (now,
sql_error insert into st_smallint_e22 using mt_smallint tags (033) values (now, abc)
sql_error insert into st_smallint_e23 using mt_smallint tags (033) values (now, "abc")
sql_error insert into st_smallint_e24 using mt_smallint tags (033) values (now, " ")
-sql insert into st_smallint_e25_1 using mt_smallint tags (033) values (now, '')
+sql_error insert into st_smallint_e25_1 using mt_smallint tags (033) values (now, '')
sql_error insert into st_smallint_e13 using mt_smallint tags (32768) values (now, -033)
sql insert into st_smallint_e14 using mt_smallint tags (-32768) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_tinyint.sim b/tests/script/tsim/parser/columnValue_tinyint.sim
index 62ae4e5228..d7938aa739 100644
--- a/tests/script/tsim/parser/columnValue_tinyint.sim
+++ b/tests/script/tsim/parser/columnValue_tinyint.sim
@@ -372,7 +372,7 @@ sql_error insert into st_tinyint_e7 values (now, "123abc")
sql_error insert into st_tinyint_e9 values (now, abc)
sql_error insert into st_tinyint_e10 values (now, "abc")
sql_error insert into st_tinyint_e11 values (now, " ")
-sql insert into st_tinyint_e12 values (now, '')
+sql_error insert into st_tinyint_e12 values (now, '')
sql_error insert into st_tinyint_e13 using mt_tinyint tags (033) values (now, 128)
sql insert into st_tinyint_e14_1 using mt_tinyint tags (033) values (now, -128)
@@ -385,7 +385,7 @@ sql_error insert into st_tinyint_e20 using mt_tinyint tags (033) values (now, "1
sql_error insert into st_tinyint_e22 using mt_tinyint tags (033) values (now, abc)
sql_error insert into st_tinyint_e23 using mt_tinyint tags (033) values (now, "abc")
sql_error insert into st_tinyint_e24 using mt_tinyint tags (033) values (now, " ")
-sql insert into st_tinyint_e25_2 using mt_tinyint tags (033) values (now, '')
+sql_error insert into st_tinyint_e25_2 using mt_tinyint tags (033) values (now, '')
sql_error insert into st_tinyint_e13 using mt_tinyint tags (128) values (now, -033)
sql insert into st_tinyint_e14 using mt_tinyint tags (-128) values (now, -033)
diff --git a/tests/script/tsim/parser/fill.sim b/tests/script/tsim/parser/fill.sim
index f688d815e7..4892345e12 100644
--- a/tests/script/tsim/parser/fill.sim
+++ b/tests/script/tsim/parser/fill.sim
@@ -47,31 +47,10 @@ $tsu = $tsu + $ts0
## fill syntax test
# number of fill values exceeds number of selected columns
-sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
-if $data11 != 6 then
- return -1
-endi
-if $data12 != 6 then
- return -1
-endi
-if $data13 != 6.00000 then
- return -1
-endi
-if $data14 != 6.000000000 then
- return -1
-endi
+sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
# number of fill values is smaller than number of selected columns
-sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
-if $data11 != 6 then
- return -1
-endi
-if $data12 != 6 then
- return -1
-endi
-if $data13 != 6.00000 then
- return -1
-endi
+sql_error select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
# unspecified filling method
sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
@@ -182,7 +161,7 @@ endi
# min_with_fill
print min_with_fill
-sql select _wstart, min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6)
if $rows != 9 then
return -1
endi
@@ -216,7 +195,7 @@ endi
# first_with_fill
print first_with_fill
-sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6')
if $rows != 9 then
return -1
endi
@@ -305,7 +284,7 @@ endi
# last_with_fill
print last_with_fill
-sql select _wstart, last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6')
if $rows != 9 then
return -1
endi
@@ -351,7 +330,7 @@ if $data11 != -1 then
endi
# fill_char_values_to_arithmetic_fields
-sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
+sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
@@ -376,37 +355,25 @@ endi
# fill_into_nonarithmetic_fieds
print select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-if $data01 != 1 then
- return -1
-endi
-if $data11 != NULL then
- return -1
-endi
+sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
# fill values into binary or nchar columns will be set to NULL automatically Note:2018-10-24
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
# fill nonarithmetic values into arithmetic fields
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
-sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
+sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
print select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
-sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
-if $rows != 9 then
- return -1
-endi
-if $data01 != 1 then
- return -1
-endi
+sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
if $rows != 9 then
@@ -416,13 +383,7 @@ if $data01 != 1 then
return -1
endi
-sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
-if $rows != 9 then
- return -1
-endi
-if $data01 != 1 then
- return -1
-endi
+sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
## linear fill
# feature currently switched off 2018/09/29
@@ -859,8 +820,8 @@ sql insert into tm0 values('2020-1-1 1:3:8', 8);
sql insert into tm0 values('2020-1-1 1:3:9', 9);
sql insert into tm0 values('2020-1-1 1:4:10', 10);
-print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85);
-sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85);
+print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90);
+sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90);
if $rows != 8 then
return -1
endi
@@ -958,14 +919,14 @@ if $data12 != NULL then
return -1
endi
-sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90,89,88,87,86,85) ;
+sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90) ;
if $rows != 21749 then
print expect 21749, actual: $rows
return -1
endi
-print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) ;
-sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) ;
+print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89) ;
+sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89) ;
if $rows != 8 then
return -1
endi
diff --git a/tests/script/tsim/parser/fill_us.sim b/tests/script/tsim/parser/fill_us.sim
index 0a45c02f58..f760ba3577 100644
--- a/tests/script/tsim/parser/fill_us.sim
+++ b/tests/script/tsim/parser/fill_us.sim
@@ -48,32 +48,11 @@ $tsu = $tsu + $ts0
## fill syntax test
# number of fill values exceeds number of selected columns
print select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
-sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
-if $data11 != 6 then
- return -1
-endi
-if $data12 != 6 then
- return -1
-endi
-if $data13 != 6.00000 then
- return -1
-endi
-if $data14 != 6.000000000 then
- return -1
-endi
+sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
# number of fill values is smaller than number of selected columns
print sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
-sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
-if $data11 != 6 then
- return -1
-endi
-if $data12 != 6 then
- return -1
-endi
-if $data13 != 6.00000 then
- return -1
-endi
+sql_error select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
# unspecified filling method
sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
@@ -185,7 +164,7 @@ endi
# min_with_fill
print min_with_fill
-sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6)
if $rows != 9 then
return -1
endi
@@ -219,7 +198,7 @@ endi
# first_with_fill
print first_with_fill
-sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6')
if $rows != 9 then
return -1
endi
@@ -308,7 +287,7 @@ endi
# last_with_fill
print last_with_fill
-sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6')
if $rows != 9 then
return -1
endi
@@ -353,7 +332,7 @@ if $data11 != -1 then
endi
# fill_char_values_to_arithmetic_fields
-sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
+sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
sql_error select _wstart, sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
@@ -379,34 +358,24 @@ endi
# fill_into_nonarithmetic_fieds
-sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-#if $data11 != 20000000 then
-if $data11 != NULL then
- return -1
-endi
+sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
# fill values into binary or nchar columns will be set to null automatically Note:2018-10-24
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
# fill nonarithmetic values into arithmetic fields
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
-sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
+sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
-sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
-if $rows != 9 then
- return -1
-endi
-if $data01 != 1 then
- return -1
-endi
+sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
if $rows != 9 then
@@ -416,14 +385,7 @@ if $data01 != 1 then
return -1
endi
-sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
-if $rows != 9 then
- return -1
-endi
-if $data01 != 1 then
- return -1
-endi
-
+sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
## linear fill
# feature currently switched off 2018/09/29
diff --git a/tests/script/tsim/parser/slimit_query.sim b/tests/script/tsim/parser/slimit_query.sim
index 1e04a31099..acf0489d3c 100644
--- a/tests/script/tsim/parser/slimit_query.sim
+++ b/tests/script/tsim/parser/slimit_query.sim
@@ -93,25 +93,25 @@ if $rows != 3 then
endi
### slimit + fill
-sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 5 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 0 offset 0
+sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 5 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 0 offset 0
if $rows != 0 then
return -1
endi
-sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 0
-print select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 0
+sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 0
+print select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 0
print $rows $data00 $data01 $data02 $data03
if $rows != 8 then
return -1
endi
# desc
-sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 0
+sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 0
if $rows != 8 then
return -1
endi
-sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 598
+sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 598
if $rows != 4 then
return -1
endi
diff --git a/tests/script/tsim/parser/timestamp_query.sim b/tests/script/tsim/parser/timestamp_query.sim
index 6e92dbcb3a..24058cbc84 100644
--- a/tests/script/tsim/parser/timestamp_query.sim
+++ b/tests/script/tsim/parser/timestamp_query.sim
@@ -28,7 +28,7 @@ sql select * from ts_stb0 where ts <> $ts0
##### select from supertable
$tb = $tbPrefix . 0
-sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1)
+sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1, -1)
$res = $rowNum * 2
$n = $res - 2
print ============>$n
@@ -47,7 +47,7 @@ if $data13 != 598.000000000 then
return -1
endi
-sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, NULL)
+sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, NULL, NULL)
if $data13 != 598.000000000 then
print expect 598.000000000, actual $data03
return -1
diff --git a/tests/script/tsim/scalar/filter.sim b/tests/script/tsim/scalar/filter.sim
new file mode 100644
index 0000000000..9231662278
--- /dev/null
+++ b/tests/script/tsim/scalar/filter.sim
@@ -0,0 +1,38 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ======== step1
+sql drop database if exists db1;
+sql create database db1 vgroups 3;
+sql use db1;
+sql create stable st1 (fts timestamp, fbool bool, ftiny tinyint, fsmall smallint, fint int, fbig bigint, futiny tinyint unsigned, fusmall smallint unsigned, fuint int unsigned, fubig bigint unsigned, ffloat float, fdouble double, fbin binary(10), fnchar nchar(10)) tags(tts timestamp, tbool bool, ttiny tinyint, tsmall smallint, tint int, tbig bigint, tutiny tinyint unsigned, tusmall smallint unsigned, tuint int unsigned, tubig bigint unsigned, tfloat float, tdouble double, tbin binary(10), tnchar nchar(10));
+sql create table tb1 using st1 tags('2022-07-10 16:31:00', true, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql create table tb2 using st1 tags('2022-07-10 16:32:00', false, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql create table tb3 using st1 tags('2022-07-10 16:33:00', true, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+
+sql insert into tb1 values ('2022-07-10 16:31:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql insert into tb1 values ('2022-07-10 16:31:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql insert into tb1 values ('2022-07-10 16:31:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+sql insert into tb1 values ('2022-07-10 16:31:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd');
+sql insert into tb1 values ('2022-07-10 16:31:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e');
+
+sql insert into tb2 values ('2022-07-10 16:32:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql insert into tb2 values ('2022-07-10 16:32:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql insert into tb2 values ('2022-07-10 16:32:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+sql insert into tb2 values ('2022-07-10 16:32:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd');
+sql insert into tb2 values ('2022-07-10 16:32:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e');
+
+sql insert into tb3 values ('2022-07-10 16:33:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql insert into tb3 values ('2022-07-10 16:33:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql insert into tb3 values ('2022-07-10 16:33:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+sql insert into tb3 values ('2022-07-10 16:33:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd');
+sql insert into tb3 values ('2022-07-10 16:33:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e');
+
+sql select * from st1 where (ttiny > 2 or ftiny < 5) and ftiny > 2;
+if $rows != 7 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_comment.sim b/tests/script/tsim/stable/alter_comment.sim
index beb049985c..7c2d6edfcb 100644
--- a/tests/script/tsim/stable/alter_comment.sim
+++ b/tests/script/tsim/stable/alter_comment.sim
@@ -95,7 +95,7 @@ sql_error alter table db.stb add tag t1 int
sql_error alter table db.stb add tag t2 int
sql_error alter table db.stb add tag t3 int
sql alter table db.stb add tag t4 bigint
-sql alter table db.stb add tag c1 int
+sql alter table db.stb add tag c1 int
sql alter table db.stb add tag t5 binary(12)
sql select * from information_schema.ins_stables where db_name = 'db'
diff --git a/tests/script/tsim/stable/alter_count.sim b/tests/script/tsim/stable/alter_count.sim
index 83ea4b14fa..4a2aeca029 100644
--- a/tests/script/tsim/stable/alter_count.sim
+++ b/tests/script/tsim/stable/alter_count.sim
@@ -5,8 +5,8 @@ print ========= start dnode1 as master
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
-sql create database d1 replica 1 duration 7 keep 50
+print ======== step1
+sql create database d1 replica 1 duration 7 keep 50
sql use d1
sql create table tb (ts timestamp, a int)
sql insert into tb values(now-28d, -28)
@@ -83,7 +83,7 @@ if $data00 != 3 then
endi
print ======== step8
-# sql alter table tb(ts timestamp, a int, b smallint, c tinyint, d int, e bigint, f float, g double, h binary(10) )
+# sql alter table tb(ts timestamp, a int, b smallint, c tinyint, d int, e bigint, f float, g double, h binary(10) )
sql alter table tb add column h binary(10)
sql insert into tb values(now-7d, -7, 18, 0, 0, 0, 0, 0, '0')
sql insert into tb values(now-6d, -6, 19, 1, 1, 1, 1, 1, '1')
@@ -260,4 +260,4 @@ if $data00 != 31 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_import.sim b/tests/script/tsim/stable/alter_import.sim
index b968eb6a12..7431ea698a 100644
--- a/tests/script/tsim/stable/alter_import.sim
+++ b/tests/script/tsim/stable/alter_import.sim
@@ -5,7 +5,7 @@ print ========= start dnode1 as master
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d1 replica 1 duration 7 keep 50
sql use d1
sql create table tb (ts timestamp, a int)
@@ -42,4 +42,4 @@ if $data00 != 6 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_insert1.sim b/tests/script/tsim/stable/alter_insert1.sim
index bcea0b48c4..0e5617e92d 100644
--- a/tests/script/tsim/stable/alter_insert1.sim
+++ b/tests/script/tsim/stable/alter_insert1.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d3
sql use d3
sql create table tb (ts timestamp, a int)
@@ -1137,4 +1137,4 @@ if $data79 != null then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_insert2.sim b/tests/script/tsim/stable/alter_insert2.sim
index faae24d32a..a6046f3dda 100644
--- a/tests/script/tsim/stable/alter_insert2.sim
+++ b/tests/script/tsim/stable/alter_insert2.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d4
sql use d4
sql create table tb (ts timestamp, a int, b smallint, c tinyint, d int, e bigint, f float, g double, h binary(10))
@@ -662,4 +662,4 @@ if $data62 != null then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_metrics.sim b/tests/script/tsim/stable/alter_metrics.sim
index e32250de13..203f41e18b 100644
--- a/tests/script/tsim/stable/alter_metrics.sim
+++ b/tests/script/tsim/stable/alter_metrics.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d2
sql use d2
sql create table mt (ts timestamp, a int) TAGS (t int)
@@ -757,8 +757,8 @@ endi
print ======= over
sql drop database d2
sql select * from information_schema.ins_databases
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/column_add.sim b/tests/script/tsim/stable/column_add.sim
index c0f3b4f490..05189f6c7d 100644
--- a/tests/script/tsim/stable/column_add.sim
+++ b/tests/script/tsim/stable/column_add.sim
@@ -116,7 +116,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != NULL then
return -1
endi
@@ -153,7 +153,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != NULL then
return -1
endi
@@ -299,4 +299,4 @@ if $rows != 10 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/column_modify.sim b/tests/script/tsim/stable/column_modify.sim
index e2752ccf95..43284ba829 100644
--- a/tests/script/tsim/stable/column_modify.sim
+++ b/tests/script/tsim/stable/column_modify.sim
@@ -31,7 +31,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -92,7 +92,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -106,4 +106,4 @@ if $data[1][3] != 101 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/disk.sim b/tests/script/tsim/stable/disk.sim
index e0e51b2625..8edd0a845e 100644
--- a/tests/script/tsim/stable/disk.sim
+++ b/tests/script/tsim/stable/disk.sim
@@ -188,4 +188,4 @@ if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/refcount.sim b/tests/script/tsim/stable/refcount.sim
index a83c0ca53f..35d8767efd 100644
--- a/tests/script/tsim/stable/refcount.sim
+++ b/tests/script/tsim/stable/refcount.sim
@@ -123,4 +123,4 @@ if $rows != 2 then
endi
print =============== step6
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_add.sim b/tests/script/tsim/stable/tag_add.sim
index 7ee9aee974..4f5f0e7452 100644
--- a/tests/script/tsim/stable/tag_add.sim
+++ b/tests/script/tsim/stable/tag_add.sim
@@ -139,7 +139,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -170,7 +170,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -190,4 +190,4 @@ if $rows != 7 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_drop.sim b/tests/script/tsim/stable/tag_drop.sim
index 7902358817..b457bf195b 100644
--- a/tests/script/tsim/stable/tag_drop.sim
+++ b/tests/script/tsim/stable/tag_drop.sim
@@ -165,7 +165,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -196,7 +196,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 201 then
return -1
endi
@@ -229,7 +229,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 201 then
return -1
endi
@@ -261,7 +261,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 301 then
return -1
endi
@@ -323,7 +323,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 302 then
return -1
endi
@@ -334,4 +334,4 @@ if $data[0][5] != 304 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_filter.sim b/tests/script/tsim/stable/tag_filter.sim
index f44142fbbf..de2a87d6c4 100644
--- a/tests/script/tsim/stable/tag_filter.sim
+++ b/tests/script/tsim/stable/tag_filter.sim
@@ -27,47 +27,47 @@ sql create table db.ctb6 using db.stb tags(6, "102")
sql insert into db.ctb6 values(now, 6, "2")
sql select * from db.stb where t1 = 1
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-sql select * from db.stb where t1 < 1
-if $rows != 0 then
+sql select * from db.stb where t1 < 1
+if $rows != 0 then
return -=1
endi
-sql select * from db.stb where t1 < 2
-if $rows != 1 then
+sql select * from db.stb where t1 < 2
+if $rows != 1 then
return -1
endi
-sql select * from db.stb where t1 <= 2
-if $rows != 2 then
+sql select * from db.stb where t1 <= 2
+if $rows != 2 then
return -1
endi
-sql select * from db.stb where t1 >= 1
-if $rows != 6 then
+sql select * from db.stb where t1 >= 1
+if $rows != 6 then
return -1
endi
-sql select * from db.stb where t1 > 1
-if $rows != 5 then
+sql select * from db.stb where t1 > 1
+if $rows != 5 then
return -1
endi
-sql select * from db.stb where t1 between 1 and 1
-if $rows != 1 then
+sql select * from db.stb where t1 between 1 and 1
+if $rows != 1 then
return -1
endi
-sql select * from db.stb where t1 between 1 and 6
-if $rows != 6 then
+sql select * from db.stb where t1 between 1 and 6
+if $rows != 6 then
return -1
endi
-sql select * from db.stb where t1 between 1 and 7
-if $rows != 6 then
+sql select * from db.stb where t1 between 1 and 7
+if $rows != 6 then
return -1
endi
@@ -88,25 +88,25 @@ sql insert into db.ctbBin2 values(now, 3, "2")
sql create table db.ctbBin3 using db.stbBin tags("d")
sql insert into db.ctbBin3 values(now, 4, "2")
-sql select * from db.stbBin where t1 = "a"
-if $rows != 1 then
+sql select * from db.stbBin where t1 = "a"
+if $rows != 1 then
return -1
endi
-sql select * from db.stbBin where t1 < "a"
-if $rows != 0 then
+sql select * from db.stbBin where t1 < "a"
+if $rows != 0 then
return -=1
endi
-sql select * from db.stbBin where t1 < "b"
-if $rows != 1 then
+sql select * from db.stbBin where t1 < "b"
+if $rows != 1 then
return -1
endi
-sql select * from db.stbBin where t1 between "a" and "e"
-if $rows != 4 then
+sql select * from db.stbBin where t1 between "a" and "e"
+if $rows != 4 then
return -1
endi
@@ -127,25 +127,25 @@ sql insert into db.ctbNc2 values(now, 3, "2")
sql create table db.ctbNc3 using db.stbNc tags("d")
sql insert into db.ctbNc3 values(now, 4, "2")
-sql select * from db.stbNc where t1 = "a"
-if $rows != 1 then
+sql select * from db.stbNc where t1 = "a"
+if $rows != 1 then
return -1
endi
-sql select * from db.stbNc where t1 < "a"
-if $rows != 0 then
+sql select * from db.stbNc where t1 < "a"
+if $rows != 0 then
return -=1
endi
-sql select * from db.stbNc where t1 < "b"
-if $rows != 1 then
+sql select * from db.stbNc where t1 < "b"
+if $rows != 1 then
return -1
endi
-sql select * from db.stbNc where t1 between "a" and "e"
-if $rows != 4 then
+sql select * from db.stbNc where t1 between "a" and "e"
+if $rows != 4 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_modify.sim b/tests/script/tsim/stable/tag_modify.sim
index 909ed79359..53e7227d1b 100644
--- a/tests/script/tsim/stable/tag_modify.sim
+++ b/tests/script/tsim/stable/tag_modify.sim
@@ -28,7 +28,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -55,7 +55,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -120,4 +120,4 @@ if $data[4][2] != 5 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_rename.sim b/tests/script/tsim/stable/tag_rename.sim
index 5bdfa24990..c85ed183de 100644
--- a/tests/script/tsim/stable/tag_rename.sim
+++ b/tests/script/tsim/stable/tag_rename.sim
@@ -28,7 +28,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -52,7 +52,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -117,4 +117,4 @@ if $data[4][2] != 4 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/sync/create-mnode.sim b/tests/script/tsim/sync/create-mnode.sim
new file mode 100644
index 0000000000..cfaafc8208
--- /dev/null
+++ b/tests/script/tsim/sync/create-mnode.sim
@@ -0,0 +1,20 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
+
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+
+sql connect
+sql create dnode $hostname port 7200
+sql create dnode $hostname port 7300
+sql create dnode $hostname port 7400
+
+sql create mnode on dnode 2
+sql create mnode on dnode 3
diff --git a/tests/script/tsim/user/privilege_sysinfo.sim b/tests/script/tsim/user/privilege_sysinfo.sim
index e8348d92d4..86f95755d0 100644
--- a/tests/script/tsim/user/privilege_sysinfo.sim
+++ b/tests/script/tsim/user/privilege_sysinfo.sim
@@ -163,6 +163,9 @@ sql select * from information_schema.ins_stables
sql select * from information_schema.ins_tables
sql select * from information_schema.ins_tags
sql select * from information_schema.ins_users
+sql select * from information_schema.ins_topics
+sql select * from information_schema.ins_subscriptions
+sql select * from information_schema.ins_streams
sql_error select * from information_schema.ins_grants
sql_error select * from information_schema.ins_vgroups
sql_error select * from information_schema.ins_configs
@@ -172,11 +175,8 @@ print =============== check performance_schema
sql use performance_schema;
sql select * from performance_schema.perf_connections
sql select * from performance_schema.perf_queries
-sql select * from performance_schema.perf_topics
sql select * from performance_schema.perf_consumers
-sql select * from performance_schema.perf_subscriptions
sql select * from performance_schema.perf_trans
-sql select * from performance_schema.perf_streams
sql select * from performance_schema.perf_apps
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/valgrind/checkError6.sim b/tests/script/tsim/valgrind/checkError6.sim
index fcc5b04c90..d85a1bebc8 100644
--- a/tests/script/tsim/valgrind/checkError6.sim
+++ b/tests/script/tsim/valgrind/checkError6.sim
@@ -67,17 +67,17 @@ sql select diff(tbcol) from tb1 where tbcol > 5 and tbcol < 20 order by ts
sql select first(tbcol), last(tbcol) as b from tb1 where ts <= 1601481840000 interval(1m)
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol order by tgcol
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol interval(1m)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
sql select last_row(*) from tb1 where tbcol > 5 and tbcol < 20
sql select _wstart, _wend, _wduration, _qstart, _qend, count(*) from tb1 interval(10s, 2s) sliding(10s)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) order by tgcol desc
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) order by tgcol desc
sql select log(tbcol), abs(tbcol), pow(tbcol, 2), sqrt(tbcol), sin(tbcol), cos(tbcol), tan(tbcol), asin(tbcol), acos(tbcol), atan(tbcol), ceil(tbcol), floor(tbcol), round(tbcol), atan(tbcol) from tb1
sql select length("abcd1234"), char_length("abcd1234=-+*") from tb1
sql select tbcol4, length(tbcol4), lower(tbcol4), upper(tbcol4), ltrim(tbcol4), rtrim(tbcol4), concat(tbcol4, tbcol5), concat_ws('_', tbcol4, tbcol5), substr(tbcol4, 1, 4) from tb1
sql select * from tb1 where tbcol not in (1,2,3,null);
sql select * from tb1 where tbcol + 3 <> null;
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
sql select tbcol5 - tbcol3 from tb1
print =============== step4: stb
@@ -97,8 +97,8 @@ sql select first(tbcol), last(tbcol) as c from stb group by tgcol
sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 and tbcol2 is null partition by tgcol interval(1m)
sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m)
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from stb where ts <= 1601481840000 partition by tgcol interval(1m)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 and tgcol = 1 partition by tgcol interval(1m) fill(value, 0) order by tgcol desc
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 and tgcol = 1 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) order by tgcol desc
sql select last_row(tbcol), stddev(tbcol) from stb where tbcol > 5 and tbcol < 20 group by tgcol
sql select _wstart, _wend, _wduration, _qstart, _qend, count(*) from stb interval(10s, 2s) sliding(10s)
sql select log(tbcol), abs(tbcol), pow(tbcol, 2), sqrt(tbcol), sin(tbcol), cos(tbcol), tan(tbcol), asin(tbcol), acos(tbcol), atan(tbcol), ceil(tbcol), floor(tbcol), round(tbcol), atan(tbcol) from stb
@@ -108,7 +108,7 @@ sql select * from stb where tbcol not in (1,2,3,null);
sql select * from stb where tbcol + 3 <> null;
sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb where tbcol = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol interval(1d)
sql select _wstart, count(*) from tb1 session(ts, 1m)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
sql select tbcol5 - tbcol3 from stb
sql select spread( tbcol2 )/44, spread(tbcol2), 0.204545455 * 44 from stb;
@@ -127,8 +127,8 @@ sql explain analyze select count(*),sum(tbcol) from stb;
sql explain analyze select count(*),sum(tbcol) from stb group by tbcol;
sql explain analyze select * from information_schema.ins_stables;
sql explain analyze verbose true select * from information_schema.ins_stables where db_name='db2';
-sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
-sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
+sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
print =============== step6: in cast
sql select 1+1n;
diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py
index 25e2378f46..cae4294bc9 100644
--- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py
+++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py
@@ -31,7 +31,7 @@ if platform.system().lower() == 'windows':
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
+ tdSql.init(conn.cursor(), False)
self._conn = conn
def createDb(self, name="test", db_update_tag=0):
@@ -357,7 +357,7 @@ class TDTestCase:
"""
normal tags and cols, one for every elm
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
@@ -365,7 +365,7 @@ class TDTestCase:
"""
check all normal type
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
for t_type in full_type_list:
input_sql, stb_name = self.genFullTypeSql(c0=t_type, t0=t_type)
@@ -379,7 +379,7 @@ class TDTestCase:
please test :
binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
'''
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
nchar_symbols = f'L{binary_symbols}'
input_sql, stb_name = self.genFullTypeSql(c7=binary_symbols, c8=nchar_symbols, t7=binary_symbols, t8=nchar_symbols)
@@ -390,7 +390,7 @@ class TDTestCase:
test ts list --> ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"]
# ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
ts_list = ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022", 0]
for ts in ts_list:
input_sql, stb_name = self.genFullTypeSql(ts=ts)
@@ -401,7 +401,7 @@ class TDTestCase:
check id.index in tags
eg: t0=**,id=**,t1=**
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True)
self.resCmp(input_sql, stb_name)
@@ -410,7 +410,7 @@ class TDTestCase:
check id param
eg: id and ID
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True)
self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True)
@@ -420,7 +420,7 @@ class TDTestCase:
"""
id not exist
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
query_sql = f"select tbname from {stb_name}"
@@ -436,10 +436,10 @@ class TDTestCase:
max col count is ??
"""
for input_sql in [self.genLongSql(127, 1)[0], self.genLongSql(1, 4093)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
for input_sql in [self.genLongSql(129, 1)[0], self.genLongSql(1, 4095)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
except SchemalessError as err:
@@ -450,7 +450,7 @@ class TDTestCase:
test illegal id name
mix "~!@#$¥%^&*()-+|[]、「」【】;:《》<>?"
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
rstr = list("~!@#$¥%^&*()-+|[]、「」【】;:《》<>?")
for i in rstr:
stb_name=f"aaa{i}bbb"
@@ -462,7 +462,7 @@ class TDTestCase:
"""
id is start with num
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(tb_name=f"\"1aaabbb\"")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -473,7 +473,7 @@ class TDTestCase:
"""
check now unsupported
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="now")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -484,7 +484,7 @@ class TDTestCase:
"""
check date format ts unsupported
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -495,7 +495,7 @@ class TDTestCase:
"""
check ts format like 16260068336390us19
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="16260068336390us19")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -506,7 +506,7 @@ class TDTestCase:
"""
check full type tag value limit
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for t1 in ["-128i8", "127i8"]:
input_sql, stb_name = self.genFullTypeSql(t1=t1)
@@ -602,7 +602,7 @@ class TDTestCase:
"""
check full type col value limit
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for c1 in ["-128i8", "127i8"]:
input_sql, stb_name = self.genFullTypeSql(c1=c1)
@@ -699,7 +699,7 @@ class TDTestCase:
"""
test illegal tag col value
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# bool
for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
input_sql1 = self.genFullTypeSql(t0=i)[0]
@@ -758,7 +758,7 @@ class TDTestCase:
"""
check duplicate Id Tag Col
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql_id = self.genFullTypeSql(id_double_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -792,7 +792,7 @@ class TDTestCase:
"""
case no id when stb exist
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", c0="f")
self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", c0="f")
@@ -805,7 +805,7 @@ class TDTestCase:
"""
check duplicate insert when stb exist
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -816,7 +816,7 @@ class TDTestCase:
"""
check length increase
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
tb_name = tdCom.getLongName(5, "letters")
@@ -833,7 +833,7 @@ class TDTestCase:
* col is added without value when update==0
* col is added with value when update==1
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
for db_update_tag in [0, 1]:
if db_update_tag == 1 :
@@ -850,7 +850,7 @@ class TDTestCase:
"""
check column and tag count add
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", c0="f")
self.resCmp(input_sql, stb_name)
@@ -866,7 +866,7 @@ class TDTestCase:
condition: stb not change
insert two table, keep tag unchange, change col
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(t0="f", c0="f", id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
tb_name1 = self.getNoIdTbName(stb_name)
@@ -888,7 +888,7 @@ class TDTestCase:
"""
every binary and nchar must be length+2
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000'
@@ -928,7 +928,7 @@ class TDTestCase:
"""
check nchar length limit
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000'
@@ -963,7 +963,7 @@ class TDTestCase:
"""
test batch insert
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
# tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
@@ -982,7 +982,7 @@ class TDTestCase:
"""
test multi insert
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
sql_list = []
stb_name = tdCom.getLongName(8, "letters")
# tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
@@ -996,7 +996,7 @@ class TDTestCase:
"""
test batch error insert
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
f"{stb_name},t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"]
@@ -1068,7 +1068,7 @@ class TDTestCase:
"""
thread input different stb
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genSqlList()[0]
self.multiThreadRun(self.genMultiThreadSeq(input_sql))
tdSql.query(f"show tables;")
@@ -1078,7 +1078,7 @@ class TDTestCase:
"""
thread input same stb tb, different data, result keep first data
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1095,7 +1095,7 @@ class TDTestCase:
"""
thread input same stb tb, different data, add columes and tags, result keep first data
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1112,7 +1112,7 @@ class TDTestCase:
"""
thread input same stb tb, different data, minus columes and tags, result keep first data
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1129,7 +1129,7 @@ class TDTestCase:
"""
thread input same stb, different tb, different data
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4]
@@ -1144,7 +1144,7 @@ class TDTestCase:
"""
thread input same stb, different tb, different data, add col, mul tag
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[5]
@@ -1159,7 +1159,7 @@ class TDTestCase:
"""
thread input same stb, different tb, different data, add tag, mul col
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_a_tag_m_col_list = self.genSqlList(stb_name=stb_name)[6]
@@ -1171,7 +1171,7 @@ class TDTestCase:
"""
thread input same stb tb, different ts
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1186,7 +1186,7 @@ class TDTestCase:
"""
thread input same stb tb, different ts, add col, mul tag
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1205,7 +1205,7 @@ class TDTestCase:
"""
thread input same stb tb, different ts, add tag, mul col
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1226,7 +1226,7 @@ class TDTestCase:
"""
thread input same stb, different tb, data, ts
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10]
@@ -1241,7 +1241,7 @@ class TDTestCase:
"""
thread input same stb, different tb, data, ts, add col, mul tag
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[11]
diff --git a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
index 003abe9d10..3b01784000 100644
--- a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
+++ b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
@@ -459,7 +459,7 @@ class TDTestCase:
normal tags and cols, one for every elm
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(value_type=value_type)
self.resCmp(input_json, stb_name)
@@ -468,7 +468,7 @@ class TDTestCase:
check all normal type
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
for t_type in full_type_list:
input_json_list = [self.genFullTypeJson(tag_value=self.genTagValue(t0_value=t_type))[0],
@@ -489,7 +489,7 @@ class TDTestCase:
binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
nchar_symbols = binary_symbols
input_sql1, stb_name1 = self.genFullTypeJson(col_value=self.genTsColValue(value=binary_symbols, t_type="binary", value_type=value_type),
@@ -505,7 +505,7 @@ class TDTestCase:
# ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
ts_list = ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006834", 0]
for ts in ts_list:
if "s" in str(ts):
@@ -571,7 +571,7 @@ class TDTestCase:
eg: t0=**,id=**,t1=**
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(id_change_tag=True, value_type=value_type)
self.resCmp(input_json, stb_name)
@@ -581,7 +581,7 @@ class TDTestCase:
eg: id and ID
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(id_upper_tag=True, value_type=value_type)
self.resCmp(input_json, stb_name)
input_json, stb_name = self.genFullTypeJson(id_mixul_tag=True, value_type=value_type)
@@ -594,7 +594,7 @@ class TDTestCase:
id not exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(id_noexist_tag=True, value_type=value_type)
self.resCmp(input_json, stb_name)
query_sql = f"select tbname from {stb_name}"
@@ -610,10 +610,10 @@ class TDTestCase:
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
for input_json in [self.genLongJson(128, value_type)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
for input_json in [self.genLongJson(129, value_type)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
except SchemalessError as err:
@@ -625,7 +625,7 @@ class TDTestCase:
mix "`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
rstr = list("`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?")
for i in rstr:
input_json = self.genFullTypeJson(tb_name=f'aa{i}bb', value_type=value_type)[0]
@@ -639,7 +639,7 @@ class TDTestCase:
id is start with num
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(tb_name="1aaabbb", value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -651,7 +651,7 @@ class TDTestCase:
check now unsupported
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="now", t_type="ns", value_type=value_type))[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -663,7 +663,7 @@ class TDTestCase:
check date format ts unsupported
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="2021-07-21\ 19:01:46.920", t_type="ns", value_type=value_type))[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -675,7 +675,7 @@ class TDTestCase:
check ts format like 16260068336390us19
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="16260068336390us19", t_type="us", value_type=value_type))[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -690,7 +690,7 @@ class TDTestCase:
length of stb_name tb_name <= 192
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tdSql.execute('reset query cache')
stb_name_192 = tdCom.getLongName(len=192, mode="letters")
tb_name_192 = tdCom.getLongName(len=192, mode="letters")
@@ -715,7 +715,7 @@ class TDTestCase:
check tag name limit <= 62
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tag_name = tdCom.getLongName(61, "letters")
tag_name = f't{tag_name}'
stb_name = tdCom.getLongName(7, "letters")
@@ -733,7 +733,7 @@ class TDTestCase:
check full type tag value limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for t1 in [-127, 127]:
input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t1_value=t1, value_type=value_type))
@@ -854,12 +854,12 @@ class TDTestCase:
check full type col value limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for value in [-128, 127]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint", value_type=value_type))
self.resCmp(input_json, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-129, 128]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint"))[0]
try:
@@ -868,11 +868,11 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
# i16
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-32768]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint", value_type=value_type))
self.resCmp(input_json, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-32769, 32768]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint"))[0]
try:
@@ -882,11 +882,11 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# i32
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-2147483648]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int", value_type=value_type))
self.resCmp(input_json, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-2147483649, 2147483648]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int"))[0]
try:
@@ -896,12 +896,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# i64
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-9223372036854775808]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint", value_type=value_type))
self.resCmp(input_json, stb_name)
# ! bug
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# for value in [-9223372036854775809, 9223372036854775808]:
# print(value)
# input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint"))[0]
@@ -913,12 +913,12 @@ class TDTestCase:
# tdSql.checkNotEqual(err.errno, 0)
# f32
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-3.4028234663852885981170418348451692544*(10**38), 3.4028234663852885981170418348451692544*(10**38)]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float", value_type=value_type))
self.resCmp(input_json, stb_name)
# * limit set to 4028234664*(10**38)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-3.4028234664*(10**38), 3.4028234664*(10**38)]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float"))[0]
try:
@@ -928,12 +928,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# f64
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308), -1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))
self.resCmp(input_json, stb_name)
# * limit set to 1.797693134862316*(10**308)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-1.797693134862316*(10**308), -1.797693134862316*(10**308)]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))[0]
try:
@@ -944,12 +944,12 @@ class TDTestCase:
# if value_type == "obj":
# # binary
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16375, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# try:
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -959,12 +959,12 @@ class TDTestCase:
# # nchar
# # * legal nchar could not be larger than 16374/4
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4093, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4094, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# try:
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -973,14 +973,14 @@ class TDTestCase:
# tdSql.checkNotEqual(err.errno, 0)
# elif value_type == "default":
# # binary
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
# input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16374, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
# elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
# input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(4093, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
# input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16375, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
# elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
@@ -997,7 +997,7 @@ class TDTestCase:
test illegal tag col value
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# bool
for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
try:
@@ -1046,7 +1046,7 @@ class TDTestCase:
check duplicate Id Tag Col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(id_double_tag=True, value_type=value_type)[0]
print(input_json)
try:
@@ -1068,7 +1068,7 @@ class TDTestCase:
case no id when stb exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(tb_name="sub_table_0123456", col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
self.resCmp(input_json, stb_name)
input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, id_noexist_tag=True, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
@@ -1081,7 +1081,7 @@ class TDTestCase:
check duplicate insert when stb exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(value_type=value_type)
self.resCmp(input_json, stb_name)
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1091,7 +1091,7 @@ class TDTestCase:
"""
check length increase
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(value_type=value_type)
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
self.resCmp(input_json, stb_name)
@@ -1105,7 +1105,7 @@ class TDTestCase:
check length increase
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = "test_crash"
input_json = self.genFullTypeJson(stb_name=stb_name)[0]
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1128,7 +1128,7 @@ class TDTestCase:
* col is added with value when update==1
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
for db_update_tag in [0, 1]:
if db_update_tag == 1 :
@@ -1154,7 +1154,7 @@ class TDTestCase:
check tag count add
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
self.resCmp(input_json, stb_name)
@@ -1171,7 +1171,7 @@ class TDTestCase:
insert two table, keep tag unchange, change col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True)
self.resCmp(input_json, stb_name)
tb_name1 = self.getNoIdTbName(stb_name)
@@ -1194,7 +1194,7 @@ class TDTestCase:
every binary and nchar must be length+2
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
tag_value = {"t0": {"value": True, "type": "bool"}}
@@ -1240,7 +1240,7 @@ class TDTestCase:
check nchar length limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
tag_value = {"t0": True}
@@ -1284,7 +1284,7 @@ class TDTestCase:
test batch insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = "stb_name"
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": 1, "type": "bigint"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}},
@@ -1319,7 +1319,7 @@ class TDTestCase:
test multi insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
sql_list = list()
stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
@@ -1335,7 +1335,7 @@ class TDTestCase:
test batch error insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": "tt", "type": "bool"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}},
{"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}]
try:
@@ -1349,7 +1349,7 @@ class TDTestCase:
test multi cols insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(c_multi_tag=True, value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1362,7 +1362,7 @@ class TDTestCase:
test blank col insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(c_blank_tag=True, value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1375,7 +1375,7 @@ class TDTestCase:
test blank tag insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(t_blank_tag=True, value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1388,7 +1388,7 @@ class TDTestCase:
check nchar ---> chinese
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(chinese_tag=True)
self.resCmp(input_json, stb_name)
@@ -1397,7 +1397,7 @@ class TDTestCase:
multi_field
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(multi_field_tag=True, value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1407,7 +1407,7 @@ class TDTestCase:
def spellCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
input_json_list = [{"metric": f'{stb_name}_1', "timestamp": {"value": 1626006833639000000, "type": "Ns"}, "value": {"value": 1, "type": "Bigint"}, "tags": {"t1": {"value": 127, "type": "tinYint"}}},
{"metric": f'{stb_name}_2', "timestamp": {"value": 1626006833639000001, "type": "nS"}, "value": {"value": 32767, "type": "smallInt"}, "tags": {"t1": {"value": 32767, "type": "smallInt"}}},
@@ -1426,7 +1426,7 @@ class TDTestCase:
def tbnameTagsColsNameCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = {'metric': 'rFa$sta', 'timestamp': {'value': 1626006834, 'type': 's'}, 'value': {'value': True, 'type': 'bool'}, 'tags': {'Tt!0': {'value': False, 'type': 'bool'}, 'tT@1': {'value': 127, 'type': 'tinyint'}, 't@2': {'value': 32767, 'type': 'smallint'}, 't$3': {'value': 2147483647, 'type': 'int'}, 't%4': {'value': 9223372036854775807, 'type': 'bigint'}, 't^5': {'value': 11.12345027923584, 'type': 'float'}, 't&6': {'value': 22.123456789, 'type': 'double'}, 't*7': {'value': 'binaryTagValue', 'type': 'binary'}, 't!@#$%^&*()_+[];:<>?,9': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': 'rFas$ta_1'}}
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
query_sql = 'select * from `rFa$sta`'
@@ -1441,7 +1441,7 @@ class TDTestCase:
metric value "." trans to "_"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(point_trans_tag=True, value_type=value_type)[0]
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
tdSql.execute("drop table `.point.trans.test`")
@@ -1509,7 +1509,7 @@ class TDTestCase:
thread input different stb
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genSqlList(value_type=value_type)[0]
self.multiThreadRun(self.genMultiThreadSeq(input_json))
tdSql.query(f"show tables;")
@@ -1520,7 +1520,7 @@ class TDTestCase:
thread input same stb tb, different data, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
@@ -1538,7 +1538,7 @@ class TDTestCase:
thread input same stb tb, different data, add columes and tags, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
@@ -1556,7 +1556,7 @@ class TDTestCase:
thread input same stb tb, different data, minus columes and tags, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
@@ -1574,7 +1574,7 @@ class TDTestCase:
thread input same stb, different tb, different data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
s_stb_d_tb_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[4]
@@ -1587,7 +1587,7 @@ class TDTestCase:
thread input same stb, different tb, different data, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
s_stb_d_tb_m_tag_list = [({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "omfdhyom", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
@@ -1605,7 +1605,7 @@ class TDTestCase:
thread input same stb, different tb, different data, add tag, mul col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[6]
@@ -1618,7 +1618,7 @@ class TDTestCase:
thread input same stb tb, different ts
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
@@ -1638,7 +1638,7 @@ class TDTestCase:
thread input same stb tb, different ts, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
@@ -1660,7 +1660,7 @@ class TDTestCase:
thread input same stb tb, different ts, add tag, mul col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
@@ -1683,7 +1683,7 @@ class TDTestCase:
thread input same stb, different tb, data, ts
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[10]
@@ -1696,7 +1696,7 @@ class TDTestCase:
thread input same stb, different tb, data, ts, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
s_stb_d_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
index 3c47a65746..209cfb724e 100644
--- a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
+++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
@@ -30,7 +30,7 @@ if platform.system().lower() == 'windows':
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
+ tdSql.init(conn.cursor(), False)
self._conn = conn
self.smlChildTableName_value = "id"
@@ -351,7 +351,7 @@ class TDTestCase:
normal tags and cols, one for every elm
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
@@ -360,7 +360,7 @@ class TDTestCase:
check all normal type
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
for t_type in full_type_list:
input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol)
@@ -375,7 +375,7 @@ class TDTestCase:
binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
nchar_symbols = f'L{binary_symbols}'
input_sql1, stb_name1 = self.genFullTypeSql(value=binary_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol)
@@ -388,7 +388,7 @@ class TDTestCase:
test ts list --> ["1626006833640ms", "1626006834s", "1626006822639022"]
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value)
input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
@@ -407,7 +407,7 @@ class TDTestCase:
def openTstbTelnetTsCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 0 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
stb_name = input_sql.split(" ")[0]
self.resCmp(input_sql, stb_name, ts=0)
@@ -431,7 +431,7 @@ class TDTestCase:
eg: t0=**,id=**,t1=**
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
@@ -441,7 +441,7 @@ class TDTestCase:
eg: id and ID
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True, protocol=protocol)
@@ -454,7 +454,7 @@ class TDTestCase:
id not exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
query_sql = f"select tbname from {stb_name}"
@@ -470,10 +470,10 @@ class TDTestCase:
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
for input_sql in [self.genLongSql(128)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
for input_sql in [self.genLongSql(129)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
@@ -486,7 +486,7 @@ class TDTestCase:
mix "`~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
rstr = list("~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?")
for i in rstr:
input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"", protocol=protocol)
@@ -498,7 +498,7 @@ class TDTestCase:
id is start with num
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb", protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
@@ -507,7 +507,7 @@ class TDTestCase:
check now unsupported
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="now")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -520,7 +520,7 @@ class TDTestCase:
check date format ts unsupported
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -533,7 +533,7 @@ class TDTestCase:
check ts format like 16260068336390us19
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="16260068336390us19")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -551,7 +551,7 @@ class TDTestCase:
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
stb_name_192 = tdCom.getLongName(len=192, mode="letters")
tb_name_192 = tdCom.getLongName(len=192, mode="letters")
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192)
self.resCmp(input_sql, stb_name)
tdSql.query(f'select * from {stb_name}')
@@ -581,7 +581,7 @@ class TDTestCase:
check tag name limit <= 62
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tag_name = tdCom.getLongName(61, "letters")
tag_name = f'T{tag_name}'
stb_name = tdCom.getLongName(7, "letters")
@@ -599,7 +599,7 @@ class TDTestCase:
check full type tag value limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# nchar
# * legal nchar could not be larger than 16374/4
stb_name = tdCom.getLongName(7, "letters")
@@ -618,12 +618,12 @@ class TDTestCase:
check full type col value limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for value in ["-128i8", "127i8"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-129i8", "128i8"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -632,11 +632,11 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
# i16
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-32768i16"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-32769i16", "32768i16"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -646,11 +646,11 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# i32
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-2147483648i32"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-2147483649i32", "2147483648i32"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -660,11 +660,11 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# i64
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-9223372036854775808i64"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-9223372036854775809i64", "9223372036854775808i64"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -674,12 +674,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# f32
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
# * limit set to 4028234664*(10**38)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -689,12 +689,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# f64
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
# # * limit set to 1.797693134862316*(10**308)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']:
# input_sql = self.genFullTypeSql(value=value)[0]
# try:
@@ -704,12 +704,12 @@ class TDTestCase:
# tdSql.checkNotEqual(err.errno, 0)
# # # binary
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t'
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t'
# try:
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -719,12 +719,12 @@ class TDTestCase:
# # nchar
# # * legal nchar could not be larger than 16374/4
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4093, "letters")}" t0=t'
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4094, "letters")}" t0=t'
# try:
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -738,7 +738,7 @@ class TDTestCase:
test illegal tag col value
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# bool
for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
input_sql1, stb_name = self.genFullTypeSql(t0=i)
@@ -774,7 +774,7 @@ class TDTestCase:
check blank case
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc aaa" t0=t',
# f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"',
# f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"',
@@ -792,7 +792,7 @@ class TDTestCase:
check duplicate Id Tag Col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql_id = self.genFullTypeSql(id_double_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.TELNET.value, None)
@@ -815,7 +815,7 @@ class TDTestCase:
case no id when stb exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f")
self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", value="f")
@@ -828,7 +828,7 @@ class TDTestCase:
check duplicate insert when stb exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -840,7 +840,7 @@ class TDTestCase:
check length increase
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
tb_name = tdCom.getLongName(5, "letters")
@@ -858,7 +858,7 @@ class TDTestCase:
* col is added with value when update==1
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
for db_update_tag in [0, 1]:
if db_update_tag == 1 :
@@ -885,7 +885,7 @@ class TDTestCase:
check tag count add
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f")
self.resCmp(input_sql, stb_name)
@@ -902,7 +902,7 @@ class TDTestCase:
insert two table, keep tag unchange, change col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
tb_name1 = self.getNoIdTbName(stb_name)
@@ -925,7 +925,7 @@ class TDTestCase:
check nchar length limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
input_sql = f'{stb_name} 1626006833640 f t2={tdCom.getLongName(1, "letters")}'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -949,7 +949,7 @@ class TDTestCase:
test batch insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
@@ -976,7 +976,7 @@ class TDTestCase:
test multi insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
sql_list = []
stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))')
@@ -992,7 +992,7 @@ class TDTestCase:
test batch error insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
lines = ["st123456 1626006833640 3i 64 t1=3i64 t2=4f64 t3=\"t3\"",
f"{stb_name} 1626056811823316532ns tRue t2=5f64 t3=L\"ste\""]
@@ -1007,7 +1007,7 @@ class TDTestCase:
test multi cols insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(c_multi_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1020,7 +1020,7 @@ class TDTestCase:
test blank col insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(c_blank_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1033,7 +1033,7 @@ class TDTestCase:
test blank tag insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(t_blank_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1046,7 +1046,7 @@ class TDTestCase:
check nchar ---> chinese
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(chinese_tag=True)
self.resCmp(input_sql, stb_name)
@@ -1055,7 +1055,7 @@ class TDTestCase:
multi_field
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(multi_field_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1065,7 +1065,7 @@ class TDTestCase:
def spellCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
input_sql_list = [f'{stb_name}_1 1626006833640 127I8 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
f'{stb_name}_2 1626006833640 32767I16 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
@@ -1086,7 +1086,7 @@ class TDTestCase:
metric value "." trans to "_"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(point_trans_tag=True, protocol=protocol)[0]
if protocol == 'telnet-tcp':
stb_name = f'`{input_sql.split(" ")[1]}`'
@@ -1097,7 +1097,7 @@ class TDTestCase:
def defaultTypeCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
input_sql_list = [f'{stb_name}_1 1626006833640 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
f'{stb_name}_2 1626006833641 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \
@@ -1110,7 +1110,7 @@ class TDTestCase:
def tbnameTagsColsNameCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
if self.smlChildTableName_value == "ID":
input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1135,7 +1135,7 @@ class TDTestCase:
stb = "put"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(tcp_keyword_tag=True, protocol=protocol)[0]
stb_name = f'`{input_sql.split(" ")[1]}`'
self.resCmp(input_sql, stb_name, protocol=protocol)
@@ -1204,7 +1204,7 @@ class TDTestCase:
thread input different stb
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genSqlList()[0]
print(input_sql)
self.multiThreadRun(self.genMultiThreadSeq(input_sql))
@@ -1216,7 +1216,7 @@ class TDTestCase:
thread input same stb tb, different data, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1235,7 +1235,7 @@ class TDTestCase:
thread input same stb tb, different data, add columes and tags, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1254,7 +1254,7 @@ class TDTestCase:
thread input same stb tb, different data, minus columes and tags, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1273,7 +1273,7 @@ class TDTestCase:
thread input same stb, different tb, different data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4]
@@ -1286,7 +1286,7 @@ class TDTestCase:
thread input same stb, different tb, different data, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833640 "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
@@ -1303,7 +1303,7 @@ class TDTestCase:
thread input same stb, different tb, different data, add tag, mul col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6]
@@ -1316,7 +1316,7 @@ class TDTestCase:
thread input same stb tb, different ts
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1336,7 +1336,7 @@ class TDTestCase:
thread input same stb tb, different ts, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1354,7 +1354,7 @@ class TDTestCase:
thread input same stb tb, different ts, add tag, mul col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1377,7 +1377,7 @@ class TDTestCase:
thread input same stb, different tb, data, ts
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10]
@@ -1390,7 +1390,7 @@ class TDTestCase:
thread input same stb, different tb, data, ts, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
diff --git a/tests/system-test/2-query/function_diff.py b/tests/system-test/2-query/function_diff.py
index fd5d6ea1cf..946453bb23 100644
--- a/tests/system-test/2-query/function_diff.py
+++ b/tests/system-test/2-query/function_diff.py
@@ -193,43 +193,38 @@ class TDTestCase:
# case17: only support normal table join
case17 = {
- "col": "t1.c1",
- "table_expr": "t1, t2",
- "condition": "where t1.ts=t2.ts"
+ "col": "table1.c1 ",
+ "table_expr": "db.t1 as table1, db.t2 as table2",
+ "condition": "where table1.ts=table2.ts"
}
self.checkdiff(**case17)
- # case18~19: with group by
- # case18 = {
- # "table_expr": "db.t1",
- # "condition": "group by c6"
- # }
- # self.checkdiff(**case18)
+ # case18~19: with group by , function diff not support group by
+
case19 = {
- "table_expr": "db.stb1",
+ "table_expr": "db.stb1 where tbname =='t0' ",
"condition": "partition by tbname order by tbname" # partition by tbname
}
self.checkdiff(**case19)
- # # case20~21: with order by
- # case20 = {"condition": "order by ts"}
- # self.checkdiff(**case20)
+ # case20~21: with order by , Not a single-group group function
- # # case22: with union
+ # case22: with union
# case22 = {
- # "condition": "union all select diff(c1) from t2"
+ # "condition": "union all select diff(c1) from db.t2 "
# }
# self.checkdiff(**case22)
+ tdSql.query("select count(c1) from db.t1 union all select count(c1) from db.t2")
# case23: with limit/slimit
case23 = {
"condition": "limit 1"
}
self.checkdiff(**case23)
- # case24 = {
- # "table_expr": "db.stb1",
- # "condition": "group by tbname slimit 1 soffset 1"
- # }
- # self.checkdiff(**case24)
+ case24 = {
+ "table_expr": "db.stb1",
+ "condition": "partition by tbname order by tbname slimit 1 soffset 1"
+ }
+ self.checkdiff(**case24)
pass
@@ -284,9 +279,9 @@ class TDTestCase:
tdSql.query(self.diff_query_form(alias=", c2")) # mix with other 1
# tdSql.error(self.diff_query_form(table_expr="db.stb1")) # select stb directly
stb_join = {
- "col": "stb1.c1",
- "table_expr": "stb1, stb2",
- "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts"
+ "col": "stable1.c1",
+ "table_expr": "db.stb1 as stable1, db.stb2 as stable2",
+ "condition": "where stable1.ts=stable2.ts and stable1.st1=stable2.st2 order by stable1.ts"
}
tdSql.query(self.diff_query_form(**stb_join)) # stb join
interval_sql = {
@@ -315,20 +310,20 @@ class TDTestCase:
for i in range(tbnum):
for j in range(data_row):
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into db.t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into db.t{i} values ("
f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
)
tdSql.execute(
- f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ f"insert into db.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
)
pass
@@ -349,8 +344,8 @@ class TDTestCase:
"create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
)
for i in range(tbnum):
- tdSql.execute(f"create table t{i} using db.stb1 tags({i})")
- tdSql.execute(f"create table tt{i} using db.stb2 tags({i})")
+ tdSql.execute(f"create table db.t{i} using db.stb1 tags({i})")
+ tdSql.execute(f"create table db.tt{i} using db.stb2 tags({i})")
pass
def diff_support_stable(self):
@@ -398,8 +393,8 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert only NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime - 5})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime + 5})")
self.diff_current_query()
self.diff_error_query()
@@ -430,9 +425,9 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert data mix with NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
self.diff_current_query()
self.diff_error_query()
diff --git a/tests/system-test/2-query/join2.py b/tests/system-test/2-query/join2.py
index 5533cb840e..5c8fe0f0f9 100644
--- a/tests/system-test/2-query/join2.py
+++ b/tests/system-test/2-query/join2.py
@@ -52,12 +52,12 @@ class TDTestCase:
return query_condition
- def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
+ def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False, alias_tb1="tb1", alias_tb2="tb2"):
table_reference = tb_list[0]
join_condition = table_reference
join = "inner join" if INNER else "join"
for i in range(len(tb_list[1:])):
- join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
+ join_condition += f" as {alias_tb1} {join} {tb_list[i+1]} as {alias_tb2} on {alias_tb1}.{filter}={alias_tb2}.{filter}"
return join_condition
@@ -123,28 +123,28 @@ class TDTestCase:
sqls = []
__join_tblist = self.__join_tblist
for join_tblist in __join_tblist:
- for join_tb in join_tblist:
- select_claus_list = self.__query_condition(join_tb)
- for select_claus in select_claus_list:
- group_claus = self.__group_condition( col=select_claus)
- where_claus = self.__where_condition( query_conditon=select_claus )
- having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
- sqls.extend(
- (
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus),
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist)),
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ),
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ),
- )
+ alias_tb = "tb1"
+ select_claus_list = self.__query_condition(alias_tb)
+ for select_claus in select_claus_list:
+ group_claus = self.__group_condition( col=select_claus)
+ where_claus = self.__where_condition( query_conditon=select_claus )
+ having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
+ sqls.extend(
+ (
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus, having_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), having_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb)),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), where_claus, having_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), where_claus, ),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), having_claus ),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True) ),
)
+ )
return list(filter(None, sqls))
def __join_check(self,):
@@ -341,10 +341,8 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute(f"flush database db")
- tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py
index f65744a0b7..5d435b068f 100644
--- a/tests/system-test/2-query/last_row.py
+++ b/tests/system-test/2-query/last_row.py
@@ -638,13 +638,13 @@ class TDTestCase:
tdSql.query(f"select ts , last_row(c1) ,c1 from (select ts , c1 ,t1 from {dbname}.stb1)")
tdSql.checkData(0,1,None)
- tdSql.query(f"select ts , last_row(c1) ,c1 from (select ts , max(c1) c1 ,t1 from {dbname}.stb1 where ts >now -1h and ts now -1h and ts now -1h and ts now -1h and ts now -1h and ts now -1h and ts 0:
- elem = math.log(elem)
- elif elem <=0:
+ if base ==1:
elem = None
+ else:
+ if elem == None:
+ elem = None
+ elif elem ==1:
+ elem = 0.0
+ elif elem >0 and elem !=1 :
+ if base==None :
+ elem = math.log(elem )
+ else:
+ print(base , elem)
+ elem = math.log(elem , base)
+ elif elem <=0:
+ elem = None
+
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
+ tdSql.query(log_query)
for row_index , row in enumerate(log_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query )
- sys.exit(1)
- else:
- tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
-
- def check_result_auto_log2(self ,origin_query , log_query):
-
- log_result = tdSql.getResult(log_query)
- origin_result = tdSql.getResult(origin_query)
-
- auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- elif elem >0:
- elem = math.log(elem,2)
- elif elem <=0:
- elem = None
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(log_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query )
- sys.exit(1)
- else:
- tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
-
- def check_result_auto_log1(self ,origin_query , log_query):
- log_result = tdSql.getResult(log_query)
- origin_result = tdSql.getResult(origin_query)
-
- auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- elif elem >0:
- elem = None
- elif elem <=0:
- elem = None
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(log_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query )
- sys.exit(1)
- else:
- tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
- def check_result_auto_log__10(self ,origin_query , log_query):
- log_result = tdSql.getResult(log_query)
- origin_result = tdSql.getResult(origin_query)
-
- auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- elif elem >0:
- elem = None
- elif elem <=0:
- elem = None
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(log_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query )
- sys.exit(1)
- else:
- tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
-
+ tdSql.checkData(row_index , col_index ,auto_result[row_index][col_index])
+
def test_errors(self, dbname="db"):
error_sql_lists = [
f"select log from {dbname}.t1",
@@ -328,10 +244,10 @@ class TDTestCase:
tdSql.checkData(3 , 0, 1.098612289)
tdSql.checkData(4 , 0, 1.386294361)
- self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.t1")
- self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from {dbname}.t1")
- self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from {dbname}.t1")
- self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,-10), log(c2 ,-10) ,log(c3, -10), log(c4 ,-10), log(c5 ,-10) from {dbname}.t1")
+ self.check_result_auto_log( None , f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.t1")
+ self.check_result_auto_log( 2 , f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from {dbname}.t1")
+ self.check_result_auto_log( 1, f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from {dbname}.t1")
+ self.check_result_auto_log( 10 ,f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,10), log(c2 ,10) ,log(c3, 10), log(c4 ,10), log(c5 ,10) from {dbname}.t1")
# used for sub table
tdSql.query(f"select c1 ,log(c1 ,3) from {dbname}.ct1")
@@ -349,9 +265,9 @@ class TDTestCase:
tdSql.checkData(3 , 2, 0.147315235)
tdSql.checkData(4 , 2, None)
- self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.ct1")
- self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from {dbname}.ct1")
- self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) from {dbname}.ct1")
+ self.check_result_auto_log( None ,f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.ct1")
+ self.check_result_auto_log( 2, f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from {dbname}.ct1")
+ self.check_result_auto_log( 10 , f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,10), log(c2,10) ,log(c3,10), log(c4,10), log(c5,10) from {dbname}.ct1")
# nest query for log functions
tdSql.query(f"select c1 , log(c1,3) ,log(log(c1,3),3) , log(log(log(c1,3),3),3) from {dbname}.ct1;")
@@ -585,15 +501,15 @@ class TDTestCase:
tdSql.error(
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound")
- self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound")
- self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) ,log(c6,-10) from {dbname}.sub1_bound")
+ self.check_result_auto_log(None , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound")
+ self.check_result_auto_log( 2 , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound")
+ self.check_result_auto_log( 10 , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,10), log(c2,10) ,log(c3,10), log(c4,10), log(c5,10) ,log(c6,10) from {dbname}.sub1_bound")
- self.check_result_auto_log2( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from {dbname}.sub1_bound")
- self.check_result_auto_log( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from {dbname}.sub1_bound")
+ self.check_result_auto_log( 2 , f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from {dbname}.sub1_bound")
+ self.check_result_auto_log( None , f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from {dbname}.sub1_bound")
- self.check_result_auto_log2(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select log(abs(c1) ,2) from {dbname}.sub1_bound" )
+ self.check_result_auto_log(2 , f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select log(abs(c1) ,2) from {dbname}.sub1_bound" )
# check basic elem for table per row
tdSql.query(f"select log(abs(c1),2) ,log(abs(c2),2) , log(abs(c3),2) , log(abs(c4),2), log(abs(c5),2), log(abs(c6),2) from {dbname}.sub1_bound ")
@@ -647,15 +563,15 @@ class TDTestCase:
def support_super_table_test(self, dbname="db"):
- self.check_result_auto_log2( f"select c5 from {dbname}.stb1 order by ts " , f"select log(c5,2) from {dbname}.stb1 order by ts" )
- self.check_result_auto_log2( f"select c5 from {dbname}.stb1 order by tbname " , f"select log(c5,2) from {dbname}.stb1 order by tbname" )
- self.check_result_auto_log2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log( 2 , f"select c5 from {dbname}.stb1 order by ts " , f"select log(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_log( 2 ,f"select c5 from {dbname}.stb1 order by tbname " , f"select log(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_log( 2 ,f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log( 2 , f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select log(t1,2), log(c5,2) from {dbname}.stb1 order by ts" )
- self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 order by tbname" )
- self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) , log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 order by ts " , f"select log(t1,2), log(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log( 2 ,f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) , log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
diff --git a/tests/system-test/2-query/lower.py b/tests/system-test/2-query/lower.py
index 0917fb63fc..0e33e3834e 100644
--- a/tests/system-test/2-query/lower.py
+++ b/tests/system-test/2-query/lower.py
@@ -96,16 +96,16 @@ class TDTestCase:
return sqls
- def __test_current(self):
+ def __test_current(self, dbname="db"):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__lower_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__lower_err_check(tb):
@@ -113,22 +113,20 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
-
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (tag1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -138,78 +136,78 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
- ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
- ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
+ f'''insert into {dbname}.ct1 values
+ ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
+ ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
- { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000}
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
- { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000}
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
- { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
- { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
- "binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
+ "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
- "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
- "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
@@ -227,10 +225,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
-
- tdSql.execute("use db")
+ tdSql.execute("flush database db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
diff --git a/tests/system-test/2-query/ltrim.py b/tests/system-test/2-query/ltrim.py
index 15f40a09c3..330f688990 100644
--- a/tests/system-test/2-query/ltrim.py
+++ b/tests/system-test/2-query/ltrim.py
@@ -23,6 +23,7 @@ CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
+DBNAME = "db"
class TDTestCase:
@@ -120,16 +121,16 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self, dbname=DBNAME): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__ltrim_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname=DBNAME):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__ltrim_err_check(tb):
@@ -142,17 +143,16 @@ class TDTestCase:
self.__test_error()
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname=DBNAME):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -162,29 +162,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname=DBNAME):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -200,7 +200,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -216,13 +216,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -251,8 +251,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/mavg.py b/tests/system-test/2-query/mavg.py
index 0995dfc6ff..b52217af9a 100644
--- a/tests/system-test/2-query/mavg.py
+++ b/tests/system-test/2-query/mavg.py
@@ -307,7 +307,7 @@ class TDTestCase:
pass
- def mavg_current_query(self) :
+ def mavg_current_query(self, dbname="db") :
# table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
# c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
@@ -325,17 +325,17 @@ class TDTestCase:
case6 = {"col": "c9"}
self.checkmavg(**case6)
- # # case7~8: nested query
- # case7 = {"table_expr": f"(select c1 from {dbname}.stb1)"}
- # self.checkmavg(**case7)
- # case8 = {"table_expr": f"(select mavg(c1, 1) c1 from {dbname}.stb1 group by tbname)"}
+ # case7~8: nested query
+ case7 = {"table_expr": f"(select c1 from {dbname}.stb1)"}
+ self.checkmavg(**case7)
+ # case8 = {"table_expr": f"(select _c0, mavg(c1, 1) c1 from {dbname}.stb1 group by tbname)"}
# self.checkmavg(**case8)
# case9~10: mix with tbname/ts/tag/col
- # case9 = {"alias": ", tbname"}
- # self.checkmavg(**case9)
- # case10 = {"alias": ", _c0"}
- # self.checkmavg(**case10)
+ case9 = {"alias": ", tbname"}
+ self.checkmavg(**case9)
+ case10 = {"alias": ", _c0"}
+ self.checkmavg(**case10)
# case11 = {"alias": ", st1"}
# self.checkmavg(**case11)
# case12 = {"alias": ", c1"}
@@ -356,7 +356,7 @@ class TDTestCase:
# case17: only support normal table join
case17 = {
"col": "t1.c1",
- "table_expr": "t1, t2",
+ "table_expr": f"{dbname}.t1 t1, {dbname}.t2 t2",
"condition": "where t1.ts=t2.ts"
}
self.checkmavg(**case17)
@@ -367,14 +367,14 @@ class TDTestCase:
# }
# self.checkmavg(**case19)
- # case20~21: with order by
+ # # case20~21: with order by
# case20 = {"condition": "order by ts"}
# self.checkmavg(**case20)
- #case21 = {
- # "table_expr": f"{dbname}.stb1",
- # "condition": "group by tbname order by tbname"
- #}
- #self.checkmavg(**case21)
+ case21 = {
+ "table_expr": f"{dbname}.stb1",
+ "condition": "group by tbname order by tbname"
+ }
+ self.checkmavg(**case21)
# # case22: with union
# case22 = {
@@ -398,7 +398,7 @@ class TDTestCase:
pass
- def mavg_error_query(self) -> None :
+ def mavg_error_query(self, dbname="db") -> None :
# unusual test
# form test
@@ -419,9 +419,9 @@ class TDTestCase:
err8 = {"table_expr": ""}
self.checkmavg(**err8) # no table_expr
- # err9 = {"col": "st1"}
+ err9 = {"col": "st1"}
# self.checkmavg(**err9) # col: tag
- # err10 = {"col": 1}
+ err10 = {"col": 1}
# self.checkmavg(**err10) # col: value
err11 = {"col": "NULL"}
self.checkmavg(**err11) # col: NULL
@@ -496,7 +496,7 @@ class TDTestCase:
# "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts"
# }
# self.checkmavg(**err44) # stb join
- tdSql.query("select mavg( stb1.c1 , 1 ) from stb1, stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts;")
+ tdSql.query(f"select mavg( stb1.c1 , 1 ) from {dbname}.stb1 stb1, {dbname}.stb2 stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts;")
err45 = {
"condition": "where ts>0 and ts < now interval(1h) fill(next)"
}
diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py
index 34442a3725..169b1c2c38 100644
--- a/tests/system-test/2-query/max.py
+++ b/tests/system-test/2-query/max.py
@@ -5,10 +5,7 @@ import numpy as np
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
- "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
+ updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
@@ -17,60 +14,80 @@ class TDTestCase:
self.ts = 1537146000000
self.binary_str = 'taosdata'
self.nchar_str = '涛思数据'
- def max_check_stb_and_tb_base(self):
+ def max_check_stb_and_tb_base(self, dbname="db"):
tdSql.prepare()
intData = []
floatData = []
- tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
+ tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
- tdSql.execute("create table stb_1 using stb tags('beijing')")
+ tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')")
for i in range(self.rowNum):
- tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
+ tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
- for i in ['ts','col11','col12','col13']:
- for j in ['db.stb','stb','db.stb_1','stb_1']:
- tdSql.error(f'select max({i} from {j} )')
+ for i in ['col11','col12','col13']:
+ for j in ['stb','stb_1']:
+ tdSql.error(f'select max({i} from {dbname}.{j} )')
for i in range(1,11):
- for j in ['db.stb','stb','db.stb_1','stb_1']:
- tdSql.query(f"select max(col{i}) from {j}")
+ for j in ['stb', 'stb_1']:
+ tdSql.query(f"select max(col{i}) from {dbname}.{j}")
if i<9:
tdSql.checkData(0, 0, np.max(intData))
elif i>=9:
tdSql.checkData(0, 0, np.max(floatData))
- tdSql.query("select max(col1) from stb_1 where col2<=5")
- tdSql.checkData(0,0,5)
- tdSql.query("select max(col1) from stb where col2<=5")
- tdSql.checkData(0,0,5)
- tdSql.execute('drop database db')
- def max_check_ntb_base(self):
+ tdSql.query(f"select max(now()) from {dbname}.stb_1")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select last(ts) from {dbname}.stb_1")
+ lastTs = tdSql.getData(0, 0)
+ tdSql.query(f"select max(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, lastTs)
+
+ tdSql.query(f"select last(ts) from {dbname}.stb")
+ lastTs = tdSql.getData(0, 0)
+ tdSql.query(f"select max(ts) from {dbname}.stb")
+ tdSql.checkData(0, 0, lastTs)
+
+ tdSql.query(f"select max(col1) from {dbname}.stb_1 where col2<=5")
+ tdSql.checkData(0,0,5)
+ tdSql.query(f"select max(col1) from {dbname}.stb where col2<=5")
+ tdSql.checkData(0,0,5)
+
+ def max_check_ntb_base(self, dbname="db"):
tdSql.prepare()
intData = []
floatData = []
- tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
+ tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20))''')
for i in range(self.rowNum):
- tdSql.execute(f"insert into ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
+ tdSql.execute(f"insert into {dbname}.ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
- for i in ['ts','col11','col12','col13']:
- for j in ['db.ntb','ntb']:
- tdSql.error(f'select max({i} from {j} )')
+ for i in ['col11','col12','col13']:
+ for j in ['ntb']:
+ tdSql.error(f'select max({i} from {dbname}.{j} )')
for i in range(1,11):
- for j in ['db.ntb','ntb']:
- tdSql.query(f"select max(col{i}) from {j}")
+ for j in ['ntb']:
+ tdSql.query(f"select max(col{i}) from {dbname}.{j}")
if i<9:
tdSql.checkData(0, 0, np.max(intData))
elif i>=9:
tdSql.checkData(0, 0, np.max(floatData))
- tdSql.query("select max(col1) from ntb where col2<=5")
- tdSql.checkData(0,0,5)
- tdSql.execute('drop database db')
+ tdSql.query(f"select max(now()) from {dbname}.ntb")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select last(ts) from {dbname}.ntb")
+ lastTs = tdSql.getData(0, 0)
+ tdSql.query(f"select max(ts) from {dbname}.ntb")
+ tdSql.checkData(0, 0, lastTs)
+
+ tdSql.query(f"select max(col1) from {dbname}.ntb where col2<=5")
+ tdSql.checkData(0,0,5)
def check_max_functions(self, tbname , col_name):
@@ -90,55 +107,55 @@ class TDTestCase:
tdLog.info(" max function work as expected, sql : %s "% max_sql)
- def support_distributed_aggregate(self):
+ def support_distributed_aggregate(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
- tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
+ tdSql.execute(f"use {dbname} ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
- tbname = "ct"+f'{i}'
+ tbname = f"{dbname}.ct{i}"
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -157,7 +174,7 @@ class TDTestCase:
tdLog.info(" prepare data for distributed_aggregate done! ")
# get vgroup_ids of all
- tdSql.query("show vgroups ")
+ tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@@ -167,7 +184,7 @@ class TDTestCase:
# check sub_table of per vnode ,make sure sub_table has been distributed
- tdSql.query("select * from information_schema.ins_tables where db_name = 'testdb' and table_name like 'ct%'")
+ tdSql.query(f"select * from information_schema.ins_tables where db_name = '{dbname}' and table_name like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@@ -182,13 +199,13 @@ class TDTestCase:
# check max function work status
- tdSql.query("show tables like 'ct%'")
+ tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
tablenames.append(table_name[0])
- tdSql.query("desc stb1")
+ tdSql.query(f"desc {dbname}.stb1")
col_names = tdSql.queryResult
colnames = []
@@ -198,11 +215,7 @@ class TDTestCase:
for tablename in tablenames:
for colname in colnames:
- self.check_max_functions(tablename,colname)
-
- # max function with basic filter
- print(vnode_tables)
-
+ self.check_max_functions(f"{dbname}.{tablename}", colname)
def run(self):
diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py
index 4b9996d9c3..01c2677242 100644
--- a/tests/system-test/2-query/max_partition.py
+++ b/tests/system-test/2-query/max_partition.py
@@ -12,16 +12,15 @@ class TDTestCase:
self.tb_nums = 10
self.ts = 1537146000000
- def prepare_datas(self, stb_name , tb_nums , row_nums ):
- tdSql.execute(" use db ")
- tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
+ def prepare_datas(self, stb_name , tb_nums , row_nums, dbname="db" ):
+ tdSql.execute(f" create stable {dbname}.{stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\
, t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ")
for i in range(tb_nums):
- tbname = f"sub_{stb_name}_{i}"
+ tbname = f"{dbname}.sub_{stb_name}_{i}"
ts = self.ts + i*10000
- tdSql.execute(f"create table {tbname} using {stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
+ tdSql.execute(f"create table {tbname} using {dbname}.{stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
for row in range(row_nums):
ts = self.ts + row*1000
@@ -31,191 +30,192 @@ class TDTestCase:
ts = self.ts + row_nums*1000 + null*1000
tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )")
- def basic_query(self):
- tdSql.query("select count(*) from stb")
+ def basic_query(self, dbname="db"):
+ tdSql.query(f"select count(*) from {dbname}.stb")
tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums)
- tdSql.query("select max(c1) from stb")
+ tdSql.query(f"select max(c1) from {dbname}.stb")
tdSql.checkData(0,0,(self.row_nums -1))
- tdSql.query(" select tbname , max(c1) from stb partition by tbname ")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname ")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select max(c1) from stb group by t1 order by t1 ")
+ tdSql.query(f"select max(c1) from {dbname}.stb group by t1 order by t1 ")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select max(c1) from stb group by c1 order by t1 ")
- tdSql.query(" select max(t2) from stb group by c1 order by t1 ")
- tdSql.query(" select max(c1) from stb group by tbname order by tbname ")
+ tdSql.query(f"select max(c1) from {dbname}.stb group by c1 order by t1 ")
+ tdSql.query(f"select max(t2) from {dbname}.stb group by c1 order by t1 ")
+ tdSql.query(f"select max(c1) from {dbname}.stb group by tbname order by tbname ")
tdSql.checkRows(self.tb_nums)
# bug need fix
- tdSql.query(" select max(t2) from stb group by t2 order by t2 ")
+ tdSql.query(f"select max(t2) from {dbname}.stb group by t2 order by t2 ")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select max(c1) from stb group by c1 order by c1 ")
+ tdSql.query(f"select max(c1) from {dbname}.stb group by c1 order by c1 ")
tdSql.checkRows(self.row_nums+1)
- tdSql.query(" select c1 , max(c1) from stb group by c1 order by c1 ")
+ tdSql.query(f"select c1 , max(c1) from {dbname}.stb group by c1 order by c1 ")
tdSql.checkRows(self.row_nums+1)
# support selective functions
- tdSql.query(" select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ")
+ tdSql.query(f"select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from {dbname}.stb group by c1 order by c1 desc ")
tdSql.checkRows(self.row_nums+1)
- tdSql.query(" select c1, tbname , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ")
+ tdSql.query(f"select c1, tbname , max(c1) ,c4 ,c5 ,t11 from {dbname}.stb group by c1 order by c1 desc ")
tdSql.checkRows(self.row_nums+1)
# bug need fix
- # tdSql.query(" select tbname , max(c1) from sub_stb_1 where c1 is null group by c1 order by c1 desc ")
- # tdSql.checkRows(1)
- # tdSql.checkData(0,0,"sub_stb_1")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.sub_stb_1 where c1 is null group by c1 order by c1 desc ")
+ tdSql.checkRows(1)
+ tdSql.checkData(0,0,"sub_stb_1")
- tdSql.query("select max(c1) ,c2 ,t2,tbname from stb group by abs(c1) order by abs(c1)")
+ tdSql.query(f"select max(c1) ,c2 ,t2,tbname from {dbname}.stb group by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select abs(c1+c3), count(c1+c3) ,max(c1+t2) from stb group by abs(c1+c3) order by abs(c1+c3)")
+ tdSql.query(f"select abs(c1+c3), count(c1+c3) ,max(c1+t2) from {dbname}.stb group by abs(c1+c3) order by abs(c1+c3)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select max(c1+c3)+min(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)")
+ tdSql.query(f"select max(c1+c3)+min(c2) ,abs(c1) from {dbname}.stb group by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
- tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from stb group by abs(c1) order by abs(t1)+c2")
- tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)+c2")
- tdSql.query("select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)")
+ tdSql.error(f"select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from {dbname}.stb group by abs(c1) order by abs(t1)+c2")
+ tdSql.error(f"select count(c1+c3)+max(c2) ,abs(c1) from {dbname}.stb group by abs(c1) order by abs(c1)+c2")
+ tdSql.query(f"select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from {dbname}.stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query(" select max(c1) , max(t2) from stb where abs(c1+t2)=1 partition by tbname ")
+ tdSql.query(f"select max(c1) , max(t2) from {dbname}.stb where abs(c1+t2)=1 partition by tbname ")
tdSql.checkRows(2)
- tdSql.query(" select max(c1) from stb where abs(c1+t2)=1 partition by tbname ")
+ tdSql.query(f"select max(c1) from {dbname}.stb where abs(c1+t2)=1 partition by tbname ")
tdSql.checkRows(2)
- tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname ")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,1,self.row_nums-1)
- tdSql.query("select tbname , max(c2) from stb partition by t1 order by t1")
- tdSql.query("select tbname , max(t2) from stb partition by t1 order by t1")
- tdSql.query("select tbname , max(t2) from stb partition by t2 order by t2")
+ tdSql.query(f"select tbname , max(c2) from {dbname}.stb partition by t1 order by t1")
+ tdSql.query(f"select tbname , max(t2) from {dbname}.stb partition by t1 order by t1")
+ tdSql.query(f"select tbname , max(t2) from {dbname}.stb partition by t2 order by t2")
# # bug need fix
- tdSql.query("select t2 , max(t2) from stb partition by t2 order by t2")
+ tdSql.query(f"select t2 , max(t2) from {dbname}.stb partition by t2 order by t2")
tdSql.checkRows(self.tb_nums)
- tdSql.query("select tbname , max(c1) from stb partition by tbname order by tbname")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,1,self.row_nums-1)
- tdSql.query("select tbname , max(c1) from stb partition by t2 order by t2")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by t2 order by t2")
- tdSql.query("select c2, max(c1) from stb partition by c2 order by c2 desc")
+ tdSql.query(f"select c2, max(c1) from {dbname}.stb partition by c2 order by c2 desc")
tdSql.checkRows(self.tb_nums+1)
tdSql.checkData(0,1,self.row_nums-1)
- tdSql.query("select tbname , max(c1) from stb partition by c1 order by c2")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by c1 order by c2")
- tdSql.query("select tbname , abs(t2) from stb partition by c2 order by t2")
+ tdSql.query(f"select tbname , abs(t2) from {dbname}.stb partition by c2 order by t2")
tdSql.checkRows(self.tb_nums*(self.row_nums+5))
- tdSql.query("select max(c1) , count(t2) from stb partition by c2 ")
+ tdSql.query(f"select max(c1) , count(t2) from {dbname}.stb partition by c2 ")
tdSql.checkRows(self.row_nums+1)
tdSql.checkData(0,1,self.row_nums)
- tdSql.query("select count(c1) , max(t2) ,c2 from stb partition by c2 order by c2")
+ tdSql.query(f"select count(c1) , max(t2) ,c2 from {dbname}.stb partition by c2 order by c2")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select count(c1) , count(t1) ,max(c2) ,tbname from stb partition by tbname order by tbname")
+ tdSql.query(f"select count(c1) , count(t1) ,max(c2) ,tbname from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
tdSql.checkCols(4)
- tdSql.query("select count(c1) , max(t2) ,t1 from stb partition by t1 order by t1")
+ tdSql.query(f"select count(c1) , max(t2) ,t1 from {dbname}.stb partition by t1 order by t1")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,self.row_nums)
# bug need fix
- tdSql.query("select count(c1) , max(t2) ,abs(c1) from stb partition by abs(c1) order by abs(c1)")
+ tdSql.query(f"select count(c1) , max(t2) ,abs(c1) from {dbname}.stb partition by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from stb partition by abs(c2) order by abs(c2)")
+ tdSql.query(f"select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from {dbname}.stb partition by abs(c2) order by abs(c2)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from stb partition by abs(floor(c1)) order by abs(floor(c1))")
+ tdSql.query(f"select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from {dbname}.stb partition by abs(floor(c1)) order by abs(floor(c1))")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select tbname , max(c1) ,c1 from stb partition by tbname order by tbname")
+ tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,'sub_stb_0')
tdSql.checkData(0,1,9)
tdSql.checkData(0,2,9)
- tdSql.query("select tbname ,top(c1,1) ,c1 from stb partition by tbname order by tbname")
+ tdSql.query(f"select tbname ,top(c1,1) ,c1 from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select c1 , sample(c1,2) from stb partition by tbname order by tbname ")
+ tdSql.query(f"select c1 , sample(c1,2) from {dbname}.stb partition by tbname order by tbname ")
tdSql.checkRows(self.tb_nums*2)
# interval
- tdSql.query("select max(c1) from stb interval(2s) sliding(1s)")
+ tdSql.query(f"select max(c1) from {dbname}.stb interval(2s) sliding(1s)")
# bug need fix
- tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
+ tdSql.query(f'select max(c1) from {dbname}.stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
- tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ")
+ tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname interval(10s) slimit 5 soffset 1 ")
- tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname interval(10s)")
tdSql.checkRows(self.row_nums*2)
- tdSql.query("select unique(c1) from stb partition by tbname order by tbname")
+ tdSql.query(f"select unique(c1) from {dbname}.stb partition by tbname order by tbname")
- tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)")
+ tdSql.query(f"select tbname , count(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s)")
tdSql.checkData(0,0,'sub_stb_1')
tdSql.checkData(0,1,self.row_nums)
- tdSql.query("select c1 , mavg(c1 ,2 ) from stb partition by c1")
+ tdSql.query(f"select c1 , mavg(c1 ,2 ) from {dbname}.stb partition by c1")
tdSql.checkRows(90)
- tdSql.query("select c1 , diff(c1 , 0) from stb partition by c1")
+ tdSql.query(f"select c1 , diff(c1 , 0) from {dbname}.stb partition by c1")
tdSql.checkRows(90)
- tdSql.query("select c1 , csum(c1) from stb partition by c1")
+ tdSql.query(f"select c1 , csum(c1) from {dbname}.stb partition by c1")
tdSql.checkRows(100)
- tdSql.query("select c1 , sample(c1,2) from stb partition by c1 order by c1")
+ tdSql.query(f"select c1 , sample(c1,2) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(21)
# bug need fix
- # tdSql.checkData(0,1,None)
+ tdSql.checkData(0,1,None)
- tdSql.query("select c1 , twa(c1) from stb partition by c1 order by c1")
+ tdSql.query(f"select c1 , twa(c1) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(11)
tdSql.checkData(0,1,None)
- tdSql.query("select c1 , irate(c1) from stb partition by c1 order by c1")
+ tdSql.query(f"select c1 , irate(c1) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(11)
tdSql.checkData(0,1,None)
- tdSql.query("select c1 , DERIVATIVE(c1,2,1) from stb partition by c1 order by c1")
+ tdSql.query(f"select c1 , DERIVATIVE(c1,2,1) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(90)
# bug need fix
tdSql.checkData(0,1,None)
- tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname slimit 5 soffset 0 ")
tdSql.checkRows(10)
- tdSql.query(" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
- tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
- tdSql.query(f'select tbname , max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
+ tdSql.query(f'select max(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
+ tdSql.query(f'select tbname , max(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
def run(self):
+ dbname = "db"
tdSql.prepare()
self.prepare_datas("stb",self.tb_nums,self.row_nums)
self.basic_query()
# # coverage case for taosd crash about bug fix
- tdSql.query(" select sum(c1) from stb where t2+10 >1 ")
- tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ")
- tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ")
- tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ")
- tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ")
+ tdSql.query(f"select sum(c1) from {dbname}.stb where t2+10 >1 ")
+ tdSql.query(f"select count(c1),count(t1) from {dbname}.stb where -t2<1 ")
+ tdSql.query(f"select tbname ,max(ceil(c1)) from {dbname}.stb group by tbname ")
+ tdSql.query(f"select avg(abs(c1)) , tbname from {dbname}.stb group by tbname ")
+ tdSql.query(f"select t1,c1 from {dbname}.stb where abs(t2+c1)=1 ")
def stop(self):
diff --git a/tests/system-test/2-query/min.py b/tests/system-test/2-query/min.py
index c27e9926ff..3d46b7b222 100644
--- a/tests/system-test/2-query/min.py
+++ b/tests/system-test/2-query/min.py
@@ -14,198 +14,159 @@ class TDTestCase:
self.ts = 1537146000000
def run(self):
+ dbname = "db"
tdSql.prepare()
intData = []
floatData = []
- tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
- tdSql.execute("create table stb_1 using stb tags('beijing')")
- tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')")
+ tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.rowNum):
- tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ tdSql.execute(f"insert into {dbname}.ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
for i in range(self.rowNum):
- tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
# max verifacation
- tdSql.error("select min(ts) from stb_1")
- tdSql.error("select min(ts) from db.stb_1")
- tdSql.error("select min(col7) from stb_1")
- tdSql.error("select min(col7) from db.stb_1")
- tdSql.error("select min(col8) from stb_1")
- tdSql.error("select min(col8) from db.stb_1")
- tdSql.error("select min(col9) from stb_1")
- tdSql.error("select min(col9) from db.stb_1")
- # tdSql.error("select min(a) from stb_1")
- # tdSql.error("select min(1) from stb_1")
- tdSql.error("select min(now()) from stb_1")
- tdSql.error("select min(count(c1),count(c2)) from stb_1")
+ tdSql.error(f"select min(col7) from {dbname}.stb_1")
+ tdSql.error(f"select min(col8) from {dbname}.stb_1")
+ tdSql.error(f"select min(col9) from {dbname}.stb_1")
+ tdSql.error(f"select min(a) from {dbname}.stb_1")
+ tdSql.query(f"select min(1) from {dbname}.stb_1")
+ tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.stb_1")
- tdSql.query("select min(col1) from stb_1")
+ tdSql.query(f"select min(col1) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col1) from db.stb_1")
+ tdSql.query(f"select min(col2) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from stb_1")
+ tdSql.query(f"select min(col3) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from db.stb_1")
+ tdSql.query(f"select min(col4) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from stb_1")
+ tdSql.query(f"select min(col11) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from db.stb_1")
+ tdSql.query(f"select min(col12) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from stb_1")
+ tdSql.query(f"select min(col13) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from db.stb_1")
+ tdSql.query(f"select min(col14) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from db.stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from db.stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from db.stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from db.stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col5) from stb_1")
+ tdSql.query(f"select min(col5) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col5) from db.stb_1")
+ tdSql.query(f"select min(col6) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from stb_1")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from db.stb_1")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col1) from stb_1 where col2>=5")
+ tdSql.query(f"select min(col1) from {dbname}.stb_1 where col2>=5")
tdSql.checkData(0,0,5)
+ tdSql.query(f"select min(now()) from {dbname}.stb_1")
+ tdSql.checkRows(1)
- tdSql.error("select min(ts) from stb_1")
- tdSql.error("select min(ts) from db.stb_1")
- tdSql.error("select min(col7) from stb_1")
- tdSql.error("select min(col7) from db.stb_1")
- tdSql.error("select min(col8) from stb_1")
- tdSql.error("select min(col8) from db.stb_1")
- tdSql.error("select min(col9) from stb_1")
- tdSql.error("select min(col9) from db.stb_1")
- # tdSql.error("select min(a) from stb_1")
- # tdSql.error("select min(1) from stb_1")
- tdSql.error("select min(now()) from stb_1")
- tdSql.error("select min(count(c1),count(c2)) from stb_1")
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
- tdSql.query("select min(col1) from stb")
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
+
+
+ tdSql.error(f"select min(col7) from {dbname}.stb_1")
+ tdSql.error(f"select min(col8) from {dbname}.stb_1")
+ tdSql.error(f"select min(col9) from {dbname}.stb_1")
+ tdSql.error(f"select min(a) from {dbname}.stb_1")
+ tdSql.query(f"select min(1) from {dbname}.stb_1")
+ tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.stb_1")
+
+ tdSql.query(f"select min(col1) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col1) from db.stb")
+ tdSql.query(f"select min(col2) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from stb")
+ tdSql.query(f"select min(col3) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from db.stb")
+ tdSql.query(f"select min(col4) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from stb")
+ tdSql.query(f"select min(col11) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from db.stb")
+ tdSql.query(f"select min(col12) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from stb")
+ tdSql.query(f"select min(col13) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from db.stb")
+ tdSql.query(f"select min(col14) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from db.stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from db.stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from db.stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from db.stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col5) from stb")
+ tdSql.query(f"select min(col5) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col5) from db.stb")
+ tdSql.query(f"select min(col6) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from stb")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from db.stb")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col1) from stb where col2>=5")
+ tdSql.query(f"select min(col1) from {dbname}.stb where col2>=5")
tdSql.checkData(0,0,5)
+ tdSql.query(f"select min(now()) from {dbname}.stb_1")
+ tdSql.checkRows(1)
- tdSql.error("select min(ts) from ntb")
- tdSql.error("select min(ts) from db.ntb")
- tdSql.error("select min(col7) from ntb")
- tdSql.error("select min(col7) from db.ntb")
- tdSql.error("select min(col8) from ntb")
- tdSql.error("select min(col8) from db.ntb")
- tdSql.error("select min(col9) from ntb")
- tdSql.error("select min(col9) from db.ntb")
- # tdSql.error("select min(a) from stb_1")
- # tdSql.error("select min(1) from stb_1")
- tdSql.error("select min(now()) from ntb")
- tdSql.error("select min(count(c1),count(c2)) from ntb")
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
- tdSql.query("select min(col1) from ntb")
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
+
+ tdSql.error(f"select min(col7) from {dbname}.ntb")
+ tdSql.error(f"select min(col8) from {dbname}.ntb")
+ tdSql.error(f"select min(col9) from {dbname}.ntb")
+ tdSql.error(f"select min(a) from {dbname}.ntb")
+ tdSql.query(f"select min(1) from {dbname}.ntb")
+ tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.ntb")
+
+ tdSql.query(f"select min(col1) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col1) from db.ntb")
+ tdSql.query(f"select min(col2) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from ntb")
+ tdSql.query(f"select min(col3) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from db.ntb")
+ tdSql.query(f"select min(col4) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from ntb")
+ tdSql.query(f"select min(col11) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from db.ntb")
+ tdSql.query(f"select min(col12) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from ntb")
+ tdSql.query(f"select min(col13) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from db.ntb")
+ tdSql.query(f"select min(col14) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from db.ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from db.ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from db.ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from db.ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col5) from ntb")
+ tdSql.query(f"select min(col5) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col5) from db.ntb")
+ tdSql.query(f"select min(col6) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from ntb")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from db.ntb")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col1) from ntb where col2>=5")
+ tdSql.query(f"select min(col1) from {dbname}.ntb where col2>=5")
tdSql.checkData(0,0,5)
+ tdSql.query(f"select min(now()) from {dbname}.stb_1")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
+
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/nestedQuery_str.py b/tests/system-test/2-query/nestedQuery_str.py
index 0d40ef8147..931ff873dc 100755
--- a/tests/system-test/2-query/nestedQuery_str.py
+++ b/tests/system-test/2-query/nestedQuery_str.py
@@ -24,9 +24,6 @@ from util.dnodes import tdDnodes
from util.dnodes import *
class TDTestCase:
- updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
diff --git a/tests/system-test/2-query/pow.py b/tests/system-test/2-query/pow.py
index 1af8bd3839..0702d05c0b 100644
--- a/tests/system-test/2-query/pow.py
+++ b/tests/system-test/2-query/pow.py
@@ -9,48 +9,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
- def prepare_datas(self):
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
+
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -65,257 +63,182 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
-
- def check_result_auto_pow2(self ,origin_query , pow_query):
+ def check_result_auto_pow(self ,base , origin_query , pow_query):
pow_result = tdSql.getResult(pow_query)
origin_result = tdSql.getResult(origin_query)
auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- else:
- elem = math.pow(elem,2)
- row_check.append(elem)
- auto_result.append(row_check)
- check_status = True
-
- for row_index , row in enumerate(pow_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def check_result_auto_pow1(self ,origin_query , pow_query):
- pow_result = tdSql.getResult(pow_query)
- origin_result = tdSql.getResult(origin_query)
-
- auto_result =[]
-
for row in origin_result:
row_check = []
for elem in row:
if elem == None:
elem = None
else :
- elem = pow(elem ,1)
+ elem = float(pow(elem ,base))
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query )
+ tdSql.checkData(row_index,col_index ,auto_result[row_index][col_index])
+
- def check_result_auto_pow__10(self ,origin_query , pow_query):
- pow_result = tdSql.getResult(pow_query)
- origin_result = tdSql.getResult(origin_query)
-
- auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- elif elem == 0:
- elem = None
- else:
- elem = pow(elem ,-10)
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(pow_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select pow from t1",
- # "select pow(-+--+c1 ,2) from t1",
- # "select +-pow(c1,2) from t1",
- # "select ++-pow(c1,2) from t1",
- # "select ++--pow(c1,2) from t1",
- # "select - -pow(c1,2)*0 from t1",
- # "select pow(tbname+1,2) from t1 ",
- "select pow(123--123,2)==1 from t1",
- "select pow(c1,2) as 'd1' from t1",
- "select pow(c1 ,c2 ,2) from t1",
- "select pow(c1 ,NULL ,2) from t1",
- "select pow(, 2) from t1;",
- "select pow(pow(c1, 2) ab from t1)",
- "select pow(c1 ,2 ) as int from t1",
- "select pow from stb1",
- # "select pow(-+--+c1) from stb1",
- # "select +-pow(c1) from stb1",
- # "select ++-pow(c1) from stb1",
- # "select ++--pow(c1) from stb1",
- # "select - -pow(c1)*0 from stb1",
- # "select pow(tbname+1) from stb1 ",
- "select pow(123--123 ,2)==1 from stb1",
- "select pow(c1 ,2) as 'd1' from stb1",
- "select pow(c1 ,c2 ,2 ) from stb1",
- "select pow(c1 ,NULL,2) from stb1",
- "select pow(,) from stb1;",
- "select pow(pow(c1 , 2) ab from stb1)",
- "select pow(c1 , 2) as int from stb1"
+ f"select pow from {dbname}.t1",
+ # f"select pow(-+--+c1 ,2) from {dbname}.t1",
+ # f"select +-pow(c1,2) from {dbname}.t1",
+ # f"select ++-pow(c1,2) from {dbname}.t1",
+ # f"select ++--pow(c1,2) from {dbname}.t1",
+ # f"select - -pow(c1,2)*0 from {dbname}.t1",
+ # f"select pow(tbname+1,2) from {dbname}.t1 ",
+ f"select pow(123--123,2)==1 from {dbname}.t1",
+ f"select pow(c1,2) as 'd1' from {dbname}.t1",
+ f"select pow(c1 ,c2 ,2) from {dbname}.t1",
+ f"select pow(c1 ,NULL ,2) from {dbname}.t1",
+ f"select pow(, 2) from {dbname}.t1;",
+ f"select pow(pow(c1, 2) ab from {dbname}.t1)",
+ f"select pow(c1 ,2 ) as int from {dbname}.t1",
+ f"select pow from {dbname}.stb1",
+ # f"select pow(-+--+c1) from {dbname}.stb1",
+ # f"select +-pow(c1) from {dbname}.stb1",
+ # f"select ++-pow(c1) from {dbname}.stb1",
+ # f"select ++--pow(c1) from {dbname}.stb1",
+ # f"select - -pow(c1)*0 from {dbname}.stb1",
+ # f"select pow(tbname+1) from {dbname}.stb1 ",
+ f"select pow(123--123 ,2)==1 from {dbname}.stb1",
+ f"select pow(c1 ,2) as 'd1' from {dbname}.stb1",
+ f"select pow(c1 ,c2 ,2 ) from {dbname}.stb1",
+ f"select pow(c1 ,NULL,2) from {dbname}.stb1",
+ f"select pow(,) from {dbname}.stb1;",
+ f"select pow(pow(c1 , 2) ab from {dbname}.stb1)",
+ f"select pow(c1 , 2) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
-
- def support_types(self):
+
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select pow(ts ,2 ) from t1" ,
- "select pow(c7,c1 ) from t1",
- "select pow(c8,c2) from t1",
- "select pow(c9,c3 ) from t1",
- "select pow(ts,c4 ) from ct1" ,
- "select pow(c7,c5 ) from ct1",
- "select pow(c8,c6 ) from ct1",
- "select pow(c9,c8 ) from ct1",
- "select pow(ts,2 ) from ct3" ,
- "select pow(c7,2 ) from ct3",
- "select pow(c8,2 ) from ct3",
- "select pow(c9,2 ) from ct3",
- "select pow(ts,2 ) from ct4" ,
- "select pow(c7,2 ) from ct4",
- "select pow(c8,2 ) from ct4",
- "select pow(c9,2 ) from ct4",
- "select pow(ts,2 ) from stb1" ,
- "select pow(c7,2 ) from stb1",
- "select pow(c8,2 ) from stb1",
- "select pow(c9,2 ) from stb1" ,
+ f"select pow(ts ,2 ) from {dbname}.t1" ,
+ f"select pow(c7,c1 ) from {dbname}.t1",
+ f"select pow(c8,c2) from {dbname}.t1",
+ f"select pow(c9,c3 ) from {dbname}.t1",
+ f"select pow(ts,c4 ) from {dbname}.ct1" ,
+ f"select pow(c7,c5 ) from {dbname}.ct1",
+ f"select pow(c8,c6 ) from {dbname}.ct1",
+ f"select pow(c9,c8 ) from {dbname}.ct1",
+ f"select pow(ts,2 ) from {dbname}.ct3" ,
+ f"select pow(c7,2 ) from {dbname}.ct3",
+ f"select pow(c8,2 ) from {dbname}.ct3",
+ f"select pow(c9,2 ) from {dbname}.ct3",
+ f"select pow(ts,2 ) from {dbname}.ct4" ,
+ f"select pow(c7,2 ) from {dbname}.ct4",
+ f"select pow(c8,2 ) from {dbname}.ct4",
+ f"select pow(c9,2 ) from {dbname}.ct4",
+ f"select pow(ts,2 ) from {dbname}.stb1" ,
+ f"select pow(c7,2 ) from {dbname}.stb1",
+ f"select pow(c8,2 ) from {dbname}.stb1",
+ f"select pow(c9,2 ) from {dbname}.stb1" ,
- "select pow(ts,2 ) from stbbb1" ,
- "select pow(c7,2 ) from stbbb1",
+ f"select pow(ts,2 ) from {dbname}.stbbb1" ,
+ f"select pow(c7,2 ) from {dbname}.stbbb1",
- "select pow(ts,2 ) from tbname",
- "select pow(c9,2 ) from tbname"
+ f"select pow(ts,2 ) from {dbname}.tbname",
+ f"select pow(c9,2 ) from {dbname}.tbname"
]
-
+
for type_sql in type_error_sql_lists:
tdSql.error(type_sql)
-
-
+
+
type_sql_lists = [
- "select pow(c1,2 ) from t1",
- "select pow(c2,2 ) from t1",
- "select pow(c3,2 ) from t1",
- "select pow(c4,2 ) from t1",
- "select pow(c5,2 ) from t1",
- "select pow(c6,2 ) from t1",
+ f"select pow(c1,2 ) from {dbname}.t1",
+ f"select pow(c2,2 ) from {dbname}.t1",
+ f"select pow(c3,2 ) from {dbname}.t1",
+ f"select pow(c4,2 ) from {dbname}.t1",
+ f"select pow(c5,2 ) from {dbname}.t1",
+ f"select pow(c6,2 ) from {dbname}.t1",
- "select pow(c1,2 ) from ct1",
- "select pow(c2,2 ) from ct1",
- "select pow(c3,2 ) from ct1",
- "select pow(c4,2 ) from ct1",
- "select pow(c5,2 ) from ct1",
- "select pow(c6,2 ) from ct1",
+ f"select pow(c1,2 ) from {dbname}.ct1",
+ f"select pow(c2,2 ) from {dbname}.ct1",
+ f"select pow(c3,2 ) from {dbname}.ct1",
+ f"select pow(c4,2 ) from {dbname}.ct1",
+ f"select pow(c5,2 ) from {dbname}.ct1",
+ f"select pow(c6,2 ) from {dbname}.ct1",
- "select pow(c1,2 ) from ct3",
- "select pow(c2,2 ) from ct3",
- "select pow(c3,2 ) from ct3",
- "select pow(c4,2 ) from ct3",
- "select pow(c5,2 ) from ct3",
- "select pow(c6,2 ) from ct3",
+ f"select pow(c1,2 ) from {dbname}.ct3",
+ f"select pow(c2,2 ) from {dbname}.ct3",
+ f"select pow(c3,2 ) from {dbname}.ct3",
+ f"select pow(c4,2 ) from {dbname}.ct3",
+ f"select pow(c5,2 ) from {dbname}.ct3",
+ f"select pow(c6,2 ) from {dbname}.ct3",
- "select pow(c1,2 ) from stb1",
- "select pow(c2,2 ) from stb1",
- "select pow(c3,2 ) from stb1",
- "select pow(c4,2 ) from stb1",
- "select pow(c5,2 ) from stb1",
- "select pow(c6,2 ) from stb1",
+ f"select pow(c1,2 ) from {dbname}.stb1",
+ f"select pow(c2,2 ) from {dbname}.stb1",
+ f"select pow(c3,2 ) from {dbname}.stb1",
+ f"select pow(c4,2 ) from {dbname}.stb1",
+ f"select pow(c5,2 ) from {dbname}.stb1",
+ f"select pow(c6,2 ) from {dbname}.stb1",
- "select pow(c6,2) as alisb from stb1",
- "select pow(c6,2) alisb from stb1",
+ f"select pow(c6,2) as alisb from {dbname}.stb1",
+ f"select pow(c6,2) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
-
- def basic_pow_function(self):
- # basic query
- tdSql.query("select c1 from ct3")
+ def basic_pow_function(self, dbname="db"):
+
+ # basic query
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select pow(c1 ,2) from ct3")
+ tdSql.query(f"select pow(c1 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c2 ,2) from ct3")
+ tdSql.query(f"select pow(c2 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c3 ,2) from ct3")
+ tdSql.query(f"select pow(c3 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c4 ,2) from ct3")
+ tdSql.query(f"select pow(c4 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c5 ,2) from ct3")
+ tdSql.query(f"select pow(c5 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c6 ,2) from ct3")
+ tdSql.query(f"select pow(c6 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select pow(c1 ,2) from t1")
+ tdSql.query(f"select pow(c1 ,2) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1.000000000)
tdSql.checkData(3 , 0, 9.000000000)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_pow2( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,2), pow(c2 ,2) ,pow(c3, 2), pow(c4 ,2), pow(c5 ,2) from t1")
- self.check_result_auto_pow1( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,1), pow(c2 ,1) ,pow(c3, 1), pow(c4 ,1), pow(c5 ,1) from t1")
- self.check_result_auto_pow__10( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,-10), pow(c2 ,-10) ,pow(c3, -10), pow(c4 ,-10), pow(c5 ,-10) from t1")
-
+ self.check_result_auto_pow( 2, f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,2) , pow(c3, 2), pow(c4 ,2), pow(c5 ,2) from {dbname}.t1")
+ self.check_result_auto_pow( 1,f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,1) , pow(c3, 1), pow(c4 ,1), pow(c5 ,1) from {dbname}.t1")
+ self.check_result_auto_pow( 10,f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,10) ,pow(c3, 10), pow(c4 ,10), pow(c5 ,10) from {dbname}.t1")
+
# used for sub table
- tdSql.query("select c1 ,pow(c1 ,2) from ct1")
+ tdSql.query(f"select c1 ,pow(c1 ,2) from {dbname}.ct1")
tdSql.checkData(0, 1, 64.000000000)
tdSql.checkData(1 , 1, 49.000000000)
tdSql.checkData(3 , 1, 25.000000000)
@@ -323,7 +246,7 @@ class TDTestCase:
# # test bug fix for pow(c1,c2)
- tdSql.query("select c1, c5 ,pow(c1,c5) from ct4")
+ tdSql.query(f"select c1, c5 ,pow(c1,c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, 104577724.506799981)
tdSql.checkData(2 , 2, 3684781.623933245)
@@ -331,11 +254,11 @@ class TDTestCase:
tdSql.checkData(4 , 2, 7573.273783071)
- self.check_result_auto_pow2( "select c1, c2, c3 , c4, c5 from ct1", "select pow(c1,2), pow(c2,2) ,pow(c3,2), pow(c4,2), pow(c5,2) from ct1")
- self.check_result_auto_pow__10( "select c1, c2, c3 , c4, c5 from ct1", "select pow(c1,-10), pow(c2,-10) ,pow(c3,-10), pow(c4,-10), pow(c5,-10) from ct1")
+ self.check_result_auto_pow( 2, f"select c1, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,2), pow(c3,2), pow(c4,2), pow(c5,2) from {dbname}.ct1")
+ self.check_result_auto_pow( 10, f"select c1, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,10), pow(c3,10), pow(c4,10), pow(c5,10) from {dbname}.ct1")
# nest query for pow functions
- tdSql.query("select c1 , pow(c1,2) ,pow(pow(c1,2),2) , pow(pow(pow(c1,2),2),2) from ct1;")
+ tdSql.query(f"select c1 , pow(c1,2) ,pow(pow(c1,2),2) , pow(pow(pow(c1,2),2),2) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 8)
tdSql.checkData(0 , 1 , 64.000000000)
tdSql.checkData(0 , 2 , 4096.000000000)
@@ -351,24 +274,24 @@ class TDTestCase:
tdSql.checkData(4 , 2 , 0.000000000)
tdSql.checkData(4 , 3 , 0.000000000)
- # # used for stable table
-
- tdSql.query("select pow(c1, 2) from stb1")
+ # # used for stable table
+
+ tdSql.query(f"select pow(c1, 2) from {dbname}.stb1")
tdSql.checkRows(25)
-
+
# used for not exists table
- tdSql.error("select pow(c1, 2) from stbbb1")
- tdSql.error("select pow(c1, 2) from tbname")
- tdSql.error("select pow(c1, 2) from ct5")
+ tdSql.error(f"select pow(c1, 2) from {dbname}.stbbb1")
+ tdSql.error(f"select pow(c1, 2) from {dbname}.tbname")
+ tdSql.error(f"select pow(c1, 2) from {dbname}.ct5")
- # mix with common col
- tdSql.query("select c1, pow(c1 ,2) from ct1")
+ # mix with common col
+ tdSql.query(f"select c1, pow(c1 ,2) from {dbname}.ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,64.000000000)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,0.000000000)
- tdSql.query("select c1, pow(c1,2) from ct4")
+ tdSql.query(f"select c1, pow(c1,2) from {dbname}.ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
@@ -377,45 +300,45 @@ class TDTestCase:
tdSql.checkData(5 , 1 ,None)
# mix with common functions
- tdSql.query("select c1, pow(c1 ,2),pow(c1,2), log(pow(c1,2) ,2) from ct4 ")
+ tdSql.query(f"select c1, pow(c1 ,2),pow(c1,2), log(pow(c1,2) ,2) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None)
-
+
tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 ,36.000000000)
tdSql.checkData(3 , 2 ,36.000000000)
tdSql.checkData(3 , 3 ,5.169925001)
- tdSql.query("select c1, pow(c1,1),c5, floor(c5 ) from stb1 ")
+ tdSql.query(f"select c1, pow(c1,1),c5, floor(c5 ) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, pow(c1 ,2),c5, count(c5) from stb1 ")
- tdSql.error("select c1, pow(c1 ,2),c5, count(c5) from ct1 ")
- tdSql.error("select pow(c1 ,2), count(c5) from stb1 ")
- tdSql.error("select pow(c1 ,2), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, pow(c1 ,2),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, pow(c1 ,2),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select pow(c1 ,2), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select pow(c1 ,2), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
+
-
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
- # # bug fix for compute
- tdSql.query("select c1, pow(c1 ,2) -0 ,pow(c1-4 ,2)-0 from ct4 ")
+ # # bug fix for compute
+ tdSql.query(f"select c1, pow(c1 ,2) -0 ,pow(c1-4 ,2)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -423,7 +346,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 64.000000000)
tdSql.checkData(1, 2, 16.000000000)
- tdSql.query(" select c1, pow(c1 ,2) -0 ,pow(c1-0.1 ,2)-0.1 from ct4")
+ tdSql.query(f"select c1, pow(c1 ,2) -0 ,pow(c1-0.1 ,2)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -431,87 +354,86 @@ class TDTestCase:
tdSql.checkData(1, 1, 64.000000000)
tdSql.checkData(1, 2, 62.310000000)
- tdSql.query("select c1, pow(c1, -10), c2, pow(c2, -10), c3, pow(c3, -10) from ct1")
+ tdSql.query(f"select c1, pow(c1, -10), c2, pow(c2, -10), c3, pow(c3, -10) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, pow(c1, 100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, pow(c1, 10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def pow_base_test(self):
+ def pow_base_test(self, dbname="db"):
# base is an regular number ,int or double
- tdSql.query("select c1, pow(c1, 2) from ct1")
+ tdSql.query(f"select c1, pow(c1, 2) from {dbname}.ct1")
tdSql.checkData(0, 1,64.000000000)
- tdSql.query("select c1, pow(c1, 2.0) from ct1")
+ tdSql.query(f"select c1, pow(c1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 64.000000000)
- tdSql.query("select c1, pow(1, 2.0) from ct1")
+ tdSql.query(f"select c1, pow(1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 1.000000000)
tdSql.checkRows(13)
# # bug for compute in functions
- # tdSql.query("select c1, abs(1/0) from ct1")
+ # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1")
# tdSql.checkData(0, 0, 8)
# tdSql.checkData(0, 1, 1)
- tdSql.query("select c1, pow(1, 2.0) from ct1")
+ tdSql.query(f"select c1, pow(1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 1.000000000)
tdSql.checkRows(13)
# two cols start pow(x,y)
- tdSql.query("select c1,c2, pow(c1,c2) from ct1")
+ tdSql.query(f"select c1,c2, pow(c1,c2) from {dbname}.ct1")
tdSql.checkData(0, 2, None)
tdSql.checkData(1, 2, None)
tdSql.checkData(4, 2, 1.000000000)
- tdSql.query("select c1,c2, pow(c2,c1) from ct1")
+ tdSql.query(f"select c1,c2, pow(c2,c1) from {dbname}.ct1")
tdSql.checkData(0, 2, 3897131646727578700481513520437089271808.000000000)
tdSql.checkData(1, 2, 17217033054561120738612297152331776.000000000)
tdSql.checkData(4, 2, 1.000000000)
- tdSql.query("select c1, pow(2.0 , c1) from ct1")
+ tdSql.query(f"select c1, pow(2.0 , c1) from {dbname}.ct1")
tdSql.checkData(0, 1, 256.000000000)
tdSql.checkData(1, 1, 128.000000000)
tdSql.checkData(4, 1, 1.000000000)
- tdSql.query("select c1, pow(2.0 , c1) from ct1")
+ tdSql.query(f"select c1, pow(2.0 , c1) from {dbname}.ct1")
tdSql.checkData(0, 1, 256.000000000)
tdSql.checkData(1, 1, 128.000000000)
tdSql.checkData(4, 1, 1.000000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -519,7 +441,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,64.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -527,7 +449,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,25.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -535,7 +457,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,25.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1 0 order by tbname " , "select pow(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_pow2( " select c5 from stb1 where c1 > 0 order by tbname " , "select pow(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_pow2( " select t1,c5 from stb1 order by ts " , "select pow(t1,2), pow(c5,2) from stb1 order by ts" )
- self.check_result_auto_pow2( " select t1,c5 from stb1 order by tbname " , "select pow(t1,2) ,pow(c5,2) from stb1 order by tbname" )
- self.check_result_auto_pow2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select pow(t1,2) ,pow(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_pow2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select pow(t1,2) , pow(c5,2) from stb1 where c1 > 0 order by tbname" )
- pass
-
-
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 order by ts " , f"select pow(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 order by ts " , f"select pow(t1,2), pow(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) , pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- tdLog.printNoPrefix("==========step2:test errors ==============")
+ tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
-
- tdLog.printNoPrefix("==========step3:support types ============")
+
+ tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
- tdLog.printNoPrefix("==========step4: pow basic query ============")
+ tdLog.printNoPrefix("==========step4: pow basic query ============")
self.basic_pow_function()
- tdLog.printNoPrefix("==========step5: big number pow query ============")
+ tdLog.printNoPrefix("==========step5: big number pow query ============")
self.test_big_number()
- tdLog.printNoPrefix("==========step6: base number for pow query ============")
+ tdLog.printNoPrefix("==========step6: base number for pow query ============")
self.pow_base_test()
- tdLog.printNoPrefix("==========step7: pow boundary query ============")
+ tdLog.printNoPrefix("==========step7: pow boundary query ============")
self.check_boundary_values()
- tdLog.printNoPrefix("==========step8: pow filter query ============")
+ tdLog.printNoPrefix("==========step8: pow filter query ============")
self.abs_func_filter()
tdLog.printNoPrefix("==========step9: check pow result of stable query ============")
- self.support_super_table_test()
+ self.support_super_table_test()
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/qnodeCluster.py b/tests/system-test/2-query/qnodeCluster.py
index f68eb58a7a..9e49bff938 100644
--- a/tests/system-test/2-query/qnodeCluster.py
+++ b/tests/system-test/2-query/qnodeCluster.py
@@ -13,9 +13,9 @@ from util.common import *
sys.path.append("./6-cluster/")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
-import threading
+import threading
class TDTestCase:
@@ -28,7 +28,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1):
tsql.execute("use %s" %dbName)
@@ -47,7 +47,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -55,7 +55,7 @@ class TDTestCase:
dbname="db_tsbs"
stabname1="readings"
stabname2="diagnostics"
- ctbnamePre1="rct"
+ ctbnamePre1="rct"
ctbnamePre2="dct"
ctbNums=40
self.ctbNums=ctbNums
@@ -73,7 +73,7 @@ class TDTestCase:
self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums)
- for j in range(ctbNums):
+ for j in range(ctbNums):
for i in range(rowNUms):
tdSql.execute(
f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
@@ -109,19 +109,19 @@ class TDTestCase:
def tsbsIotQuery(self,tdSql):
tdSql.execute("use db_tsbs")
-
+
# test interval and partition
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
# print(tdSql.queryResult)
parRows=tdSql.queryRows
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
tdSql.checkRows(parRows)
-
-
- # # test insert into
+
+
+ # # test insert into
# tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;")
# tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
-
+
# tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
@@ -141,7 +141,7 @@ class TDTestCase:
tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
- # 2 stationary-trucks
+ # 2 stationary-trucks
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
@@ -156,7 +156,7 @@ class TDTestCase:
tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
- # # 6. avg-daily-driving-session
+ # # 6. avg-daily-driving-session
# #taosc core dumped
# tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))")
# tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;")
@@ -166,7 +166,7 @@ class TDTestCase:
# 7. avg-load
tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
- # 8. daily-activity
+ # 8. daily-activity
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
@@ -184,7 +184,7 @@ class TDTestCase:
tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
-
+
#it's already supported:
# last-loc
tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")
@@ -192,7 +192,7 @@ class TDTestCase:
#2. low-fuel
tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;")
-
+
# 3. avg-vs-projected-fuel-consumption
tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet")
@@ -213,16 +213,16 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
-
+
tdSql.query("select * from information_schema.ins_dnodes;")
tdLog.debug(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
- tdLog.info("create database and stable")
+ tdLog.info("create database and stable")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
@@ -234,7 +234,7 @@ class TDTestCase:
for tr in threads:
tr.start()
- tdLog.info("Take turns stopping %s "%stopRole)
+ tdLog.info("Take turns stopping %s "%stopRole)
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@@ -242,7 +242,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -254,7 +254,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@@ -265,12 +265,12 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
for tr in threads:
tr.join()
- def run(self):
+ def run(self):
tdLog.printNoPrefix("==========step1:create database and table,insert data ==============")
self.createCluster()
self.prepareData()
diff --git a/tests/system-test/2-query/query_cols_tags_and_or.py b/tests/system-test/2-query/query_cols_tags_and_or.py
index e0fb986d79..af3fbb83c0 100644
--- a/tests/system-test/2-query/query_cols_tags_and_or.py
+++ b/tests/system-test/2-query/query_cols_tags_and_or.py
@@ -19,7 +19,7 @@ class TDTestCase:
def init(self, conn, logSql):
## add for TD-6672
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
+ tdSql.init(conn.cursor(), False)
def insertData(self, tb_name):
insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1, 2, 3, 4)',
@@ -37,17 +37,17 @@ class TDTestCase:
for sql in insert_sql_list:
tdSql.execute(sql)
- def initTb(self):
- tdCom.cleanTb()
- tb_name = tdCom.getLongName(8, "letters")
+ def initTb(self, dbname="db"):
+ tdCom.cleanTb(dbname)
+ tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}'
tdSql.execute(
f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned)")
self.insertData(tb_name)
return tb_name
- def initStb(self, count=5):
- tdCom.cleanTb()
- tb_name = tdCom.getLongName(8, "letters")
+ def initStb(self, count=5, dbname="db"):
+ tdCom.cleanTb(dbname)
+ tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}'
tdSql.execute(
f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)")
for i in range(1, count+1):
@@ -56,9 +56,10 @@ class TDTestCase:
self.insertData(f'{tb_name}_sub_{i}')
return tb_name
- def initTwoStb(self):
- tdCom.cleanTb()
- tb_name = tdCom.getLongName(8, "letters")
+ def initTwoStb(self, dbname="db"):
+ tdCom.cleanTb(dbname)
+ tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}'
+ # tb_name = tdCom.getLongName(8, "letters")
tb_name1 = f'{tb_name}1'
tb_name2 = f'{tb_name}2'
tdSql.execute(
diff --git a/tests/system-test/2-query/round.py b/tests/system-test/2-query/round.py
index 551e225a4d..1d69d3c9af 100644
--- a/tests/system-test/2-query/round.py
+++ b/tests/system-test/2-query/round.py
@@ -8,49 +8,46 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -94,68 +91,68 @@ class TDTestCase:
else:
tdLog.info("round value check pass , it work as expected ,sql is \"%s\" "%round_query )
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select round from t1",
- # "select round(-+--+c1) from t1",
- # "select +-round(c1) from t1",
- # "select ++-round(c1) from t1",
- # "select ++--round(c1) from t1",
- # "select - -round(c1)*0 from t1",
- # "select round(tbname+1) from t1 ",
- "select round(123--123)==1 from t1",
- "select round(c1) as 'd1' from t1",
- "select round(c1 ,c2 ) from t1",
- "select round(c1 ,NULL) from t1",
- "select round(,) from t1;",
- "select round(round(c1) ab from t1)",
- "select round(c1) as int from t1",
- "select round from stb1",
- # "select round(-+--+c1) from stb1",
- # "select +-round(c1) from stb1",
- # "select ++-round(c1) from stb1",
- # "select ++--round(c1) from stb1",
- # "select - -round(c1)*0 from stb1",
- # "select round(tbname+1) from stb1 ",
- "select round(123--123)==1 from stb1",
- "select round(c1) as 'd1' from stb1",
- "select round(c1 ,c2 ) from stb1",
- "select round(c1 ,NULL) from stb1",
- "select round(,) from stb1;",
- "select round(round(c1) ab from stb1)",
- "select round(c1) as int from stb1"
+ f"select round from {dbname}.t1",
+ # f"select round(-+--+c1) from {dbname}.t1",
+ # f"select +-round(c1) from {dbname}.t1",
+ # f"select ++-round(c1) from {dbname}.t1",
+ # f"select ++--round(c1) from {dbname}.t1",
+ # f"select - -round(c1)*0 from {dbname}.t1",
+ # f"select round(tbname+1) from {dbname}.t1 ",
+ f"select round(123--123)==1 from {dbname}.t1",
+ f"select round(c1) as 'd1' from {dbname}.t1",
+ f"select round(c1 ,c2 ) from {dbname}.t1",
+ f"select round(c1 ,NULL) from {dbname}.t1",
+ f"select round(,) from {dbname}.t1;",
+ f"select round(round(c1) ab from {dbname}.t1)",
+ f"select round(c1) as int from {dbname}.t1",
+ f"select round from {dbname}.stb1",
+ # f"select round(-+--+c1) from {dbname}.stb1",
+ # f"select +-round(c1) from {dbname}.stb1",
+ # f"select ++-round(c1) from {dbname}.stb1",
+ # f"select ++--round(c1) from {dbname}.stb1",
+ # f"select - -round(c1)*0 from {dbname}.stb1",
+ # f"select round(tbname+1) from {dbname}.stb1 ",
+ f"select round(123--123)==1 from {dbname}.stb1",
+ f"select round(c1) as 'd1' from {dbname}.stb1",
+ f"select round(c1 ,c2 ) from {dbname}.stb1",
+ f"select round(c1 ,NULL) from {dbname}.stb1",
+ f"select round(,) from {dbname}.stb1;",
+ f"select round(round(c1) ab from {dbname}.stb1)",
+ f"select round(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select round(ts) from t1" ,
- "select round(c7) from t1",
- "select round(c8) from t1",
- "select round(c9) from t1",
- "select round(ts) from ct1" ,
- "select round(c7) from ct1",
- "select round(c8) from ct1",
- "select round(c9) from ct1",
- "select round(ts) from ct3" ,
- "select round(c7) from ct3",
- "select round(c8) from ct3",
- "select round(c9) from ct3",
- "select round(ts) from ct4" ,
- "select round(c7) from ct4",
- "select round(c8) from ct4",
- "select round(c9) from ct4",
- "select round(ts) from stb1" ,
- "select round(c7) from stb1",
- "select round(c8) from stb1",
- "select round(c9) from stb1" ,
+ f"select round(ts) from {dbname}.t1" ,
+ f"select round(c7) from {dbname}.t1",
+ f"select round(c8) from {dbname}.t1",
+ f"select round(c9) from {dbname}.t1",
+ f"select round(ts) from {dbname}.ct1" ,
+ f"select round(c7) from {dbname}.ct1",
+ f"select round(c8) from {dbname}.ct1",
+ f"select round(c9) from {dbname}.ct1",
+ f"select round(ts) from {dbname}.ct3" ,
+ f"select round(c7) from {dbname}.ct3",
+ f"select round(c8) from {dbname}.ct3",
+ f"select round(c9) from {dbname}.ct3",
+ f"select round(ts) from {dbname}.ct4" ,
+ f"select round(c7) from {dbname}.ct4",
+ f"select round(c8) from {dbname}.ct4",
+ f"select round(c9) from {dbname}.ct4",
+ f"select round(ts) from {dbname}.stb1" ,
+ f"select round(c7) from {dbname}.stb1",
+ f"select round(c8) from {dbname}.stb1",
+ f"select round(c9) from {dbname}.stb1" ,
- "select round(ts) from stbbb1" ,
- "select round(c7) from stbbb1",
+ f"select round(ts) from {dbname}.stbbb1" ,
+ f"select round(c7) from {dbname}.stbbb1",
- "select round(ts) from tbname",
- "select round(c9) from tbname"
+ f"select round(ts) from {dbname}.tbname",
+ f"select round(c9) from {dbname}.tbname"
]
@@ -164,127 +161,127 @@ class TDTestCase:
type_sql_lists = [
- "select round(c1) from t1",
- "select round(c2) from t1",
- "select round(c3) from t1",
- "select round(c4) from t1",
- "select round(c5) from t1",
- "select round(c6) from t1",
+ f"select round(c1) from {dbname}.t1",
+ f"select round(c2) from {dbname}.t1",
+ f"select round(c3) from {dbname}.t1",
+ f"select round(c4) from {dbname}.t1",
+ f"select round(c5) from {dbname}.t1",
+ f"select round(c6) from {dbname}.t1",
- "select round(c1) from ct1",
- "select round(c2) from ct1",
- "select round(c3) from ct1",
- "select round(c4) from ct1",
- "select round(c5) from ct1",
- "select round(c6) from ct1",
+ f"select round(c1) from {dbname}.ct1",
+ f"select round(c2) from {dbname}.ct1",
+ f"select round(c3) from {dbname}.ct1",
+ f"select round(c4) from {dbname}.ct1",
+ f"select round(c5) from {dbname}.ct1",
+ f"select round(c6) from {dbname}.ct1",
- "select round(c1) from ct3",
- "select round(c2) from ct3",
- "select round(c3) from ct3",
- "select round(c4) from ct3",
- "select round(c5) from ct3",
- "select round(c6) from ct3",
+ f"select round(c1) from {dbname}.ct3",
+ f"select round(c2) from {dbname}.ct3",
+ f"select round(c3) from {dbname}.ct3",
+ f"select round(c4) from {dbname}.ct3",
+ f"select round(c5) from {dbname}.ct3",
+ f"select round(c6) from {dbname}.ct3",
- "select round(c1) from stb1",
- "select round(c2) from stb1",
- "select round(c3) from stb1",
- "select round(c4) from stb1",
- "select round(c5) from stb1",
- "select round(c6) from stb1",
+ f"select round(c1) from {dbname}.stb1",
+ f"select round(c2) from {dbname}.stb1",
+ f"select round(c3) from {dbname}.stb1",
+ f"select round(c4) from {dbname}.stb1",
+ f"select round(c5) from {dbname}.stb1",
+ f"select round(c6) from {dbname}.stb1",
- "select round(c6) as alisb from stb1",
- "select round(c6) alisb from stb1",
+ f"select round(c6) as alisb from {dbname}.stb1",
+ f"select round(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_round_function(self):
+ def basic_round_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select round(c1) from ct3")
+ tdSql.query(f"select round(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c2) from ct3")
+ tdSql.query(f"select round(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c3) from ct3")
+ tdSql.query(f"select round(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c4) from ct3")
+ tdSql.query(f"select round(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c5) from ct3")
+ tdSql.query(f"select round(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c6) from ct3")
+ tdSql.query(f"select round(c6) from {dbname}.ct3")
# used for regular table
- tdSql.query("select round(c1) from t1")
+ tdSql.query(f"select round(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1)
tdSql.checkData(3 , 0, 3)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from t1")
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.t1")
# used for sub table
- tdSql.query("select round(c1) from ct1")
+ tdSql.query(f"select round(c1) from {dbname}.ct1")
tdSql.checkData(0, 0, 8)
tdSql.checkData(1 , 0, 7)
tdSql.checkData(3 , 0, 5)
tdSql.checkData(5 , 0, 4)
- tdSql.query("select round(c1) from ct1")
- self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct1")
- self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" )
+ tdSql.query(f"select round(c1) from {dbname}.ct1")
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.ct1")
+ self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.ct1;",f"select c1 from {dbname}.ct1" )
# used for stable table
- tdSql.query("select round(c1) from stb1")
+ tdSql.query(f"select round(c1) from {dbname}.stb1")
tdSql.checkRows(25)
- self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct4")
- self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" )
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.ct4 ", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.ct4")
+ self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.ct4;" , f"select c1 from {dbname}.ct4" )
# used for not exists table
- tdSql.error("select round(c1) from stbbb1")
- tdSql.error("select round(c1) from tbname")
- tdSql.error("select round(c1) from ct5")
+ tdSql.error(f"select round(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select round(c1) from {dbname}.tbname")
+ tdSql.error(f"select round(c1) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, round(c1) from ct1")
+ tdSql.query(f"select c1, round(c1) from {dbname}.ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,8)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,0)
- tdSql.query("select c1, round(c1) from ct4")
+ tdSql.query(f"select c1, round(c1) from {dbname}.ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
tdSql.checkData(4 , 1 ,5)
tdSql.checkData(5 , 0 ,None)
tdSql.checkData(5 , 1 ,None)
- tdSql.query("select c1, round(c1) from ct4 ")
+ tdSql.query(f"select c1, round(c1) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
tdSql.checkData(4 , 1 ,5)
# mix with common functions
- tdSql.query("select c1, round(c1),c5, round(c5) from ct4 ")
+ tdSql.query(f"select c1, round(c1),c5, round(c5) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -300,34 +297,34 @@ class TDTestCase:
tdSql.checkData(6 , 2 ,4.44000)
tdSql.checkData(6 , 3 ,4.00000)
- tdSql.query("select c1, round(c1),c5, round(c5) from stb1 ")
+ tdSql.query(f"select c1, round(c1),c5, round(c5) from {dbname}.stb1 ")
# mix with agg functions , not support
- tdSql.error("select c1, round(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, round(c1),c5, count(c5) from ct1 ")
- tdSql.error("select round(c1), count(c5) from stb1 ")
- tdSql.error("select round(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, round(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, round(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select round(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select round(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
# bug fix for compute
- tdSql.query("select c1, abs(c1) -0 ,round(c1)-0 from ct4 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,round(c1)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -335,7 +332,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 8.000000000)
tdSql.checkData(1, 2, 8.000000000)
- tdSql.query(" select c1, abs(c1) -0 ,round(c1-0.1)-0.1 from ct4")
+ tdSql.query(f"select c1, abs(c1) -0 ,round(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -343,9 +340,8 @@ class TDTestCase:
tdSql.checkData(1, 1, 8.000000000)
tdSql.checkData(1, 2, 7.900000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -353,7 +349,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,3.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -361,7 +357,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -369,7 +365,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) , round(abs(c1))-0.5 from ct4 where c1>log(c1,2) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) , round(abs(c1))-0.5 from {dbname}.ct4 where c1>log(c1,2) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,88888)
@@ -382,44 +378,42 @@ class TDTestCase:
def round_Arithmetic(self):
pass
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from sub1_bound")
- self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from sub1_bound")
- self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from sub1_bound;" , "select round(c1) from sub1_bound" )
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from {dbname}.sub1_bound")
+ self.check_result_auto( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from {dbname}.sub1_bound")
+ self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.sub1_bound;" , f"select round(c1) from {dbname}.sub1_bound" )
# check basic elem for table per row
- tdSql.query("select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from sub1_bound ")
+ tdSql.query(f"select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from {dbname}.sub1_bound ")
tdSql.checkData(0, 0, 2147483647.000000000)
tdSql.checkData(0, 2, 32767.000000000)
tdSql.checkData(0, 3, 127.000000000)
@@ -430,19 +424,18 @@ class TDTestCase:
tdSql.checkData(4, 3, -123.000000000)
tdSql.checkData(4, 4, -169499995645668991474575059260979281920.000000000)
- self.check_result_auto("select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from sub1_bound" ,"select round(c1+1) ,round(c2) , round(c3*1) , round(c4/2), round(c5)/2, round(c6) from sub1_bound ")
+ self.check_result_auto(f"select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from {dbname}.sub1_bound" ,f"select round(c1+1) ,round(c2) , round(c3*1) , round(c4/2), round(c5)/2, round(c6) from {dbname}.sub1_bound ")
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto( " select c5 from stb1 order by ts " , "select round(c5) from stb1 order by ts" )
- self.check_result_auto( " select c5 from stb1 order by tbname " , "select round(c5) from stb1 order by tbname" )
- self.check_result_auto( " select c5 from stb1 where c1 > 0 order by tbname " , "select round(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select c5 from stb1 where c1 > 0 order by tbname " , "select round(c5) from stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto( f"select c5 from {dbname}.stb1 order by ts " , f"select round(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f"select c5 from {dbname}.stb1 order by tbname " , f"select round(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t1,c5 from stb1 order by ts " , "select round(t1), round(c5) from stb1 order by ts" )
- self.check_result_auto( " select t1,c5 from stb1 order by tbname " , "select round(t1) ,round(c5) from stb1 order by tbname" )
- self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) ,round(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) , round(c5) from stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select round(t1), round(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select round(t1) ,round(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(t1) ,round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(t1) , round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
diff --git a/tests/system-test/2-query/rtrim.py b/tests/system-test/2-query/rtrim.py
index 30624792cc..80307e8534 100644
--- a/tests/system-test/2-query/rtrim.py
+++ b/tests/system-test/2-query/rtrim.py
@@ -120,16 +120,16 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__rtrim_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__rtrim_err_check(tb):
@@ -142,17 +142,15 @@ class TDTestCase:
self.__test_error()
- def __create_tb(self):
- tdSql.prepare()
-
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -162,29 +160,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -200,7 +198,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -216,13 +214,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -251,8 +249,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py
index 45be0ef8ab..7f1d7ab8c0 100644
--- a/tests/system-test/2-query/sample.py
+++ b/tests/system-test/2-query/sample.py
@@ -11,21 +11,17 @@
# -*- coding: utf-8 -*-
-from pstats import Stats
import sys
-import subprocess
import random
-import math
-import numpy as np
-import inspect
import re
-import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
+DBNAME = "db"
+
class TDTestCase:
def init(self, conn, logSql):
@@ -33,11 +29,11 @@ class TDTestCase:
tdSql.init(conn.cursor())
self.ts = 1537146000000
- def sample_query_form(self, sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""):
+ def sample_query_form(self, sel=f"select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""):
'''
sample function:
- :param sel: string, must be "select", required parameters;
+ :param sel: string, must be f"select", required parameters;
:param func: string, in this case must be "sample(", otherwise return other function, required parameters;
:param col: string, column name, required parameters;
:param m_comm: string, comma between col and k , required parameters;
@@ -47,12 +43,12 @@ class TDTestCase:
:param fr: string, must be "from", required parameters;
:param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters;
:param condition: expression;
- :return: sample query statement,default: select sample(c1, 1) from t1
+ :return: sample query statement,default: select sample(c1, 1) from {dbname}.t1
'''
return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}"
- def checksample(self,sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""):
+ def checksample(self,sel=f"select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr=f"{DBNAME}.t1", condition=""):
# print(self.sample_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
# table_expr=table_expr, condition=condition))
line = sys._getframe().f_back.f_lineno
@@ -65,7 +61,7 @@ class TDTestCase:
))
- sql = "select * from t1"
+ sql = f"select * from {table_expr}"
collist = tdSql.getColNameList(sql)
if not isinstance(col, str):
@@ -125,7 +121,7 @@ class TDTestCase:
# table_expr=table_expr, condition=condition
# ))
- if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != "select"]):
+ if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != f"select"]):
print(f"case in {line}: ", end='')
return tdSql.error(self.sample_query_form(
sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
@@ -286,14 +282,14 @@ class TDTestCase:
return
else:
- if "where" in condition:
- condition = re.sub('where', f"where {col} is not null and ", condition)
- else:
- condition = f"where {col} is not null" + condition
- print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
- tdSql.query(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
+ # if "where" in condition:
+ # condition = re.sub('where', f"where {col} is not null and ", condition)
+ # else:
+ # condition = f"where {col} is not null" + condition
+ # print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
+ # tdSql.query(f"select _c0, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
# offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0
- pre_sample = tdSql.queryResult
+ # pre_sample = tdSql.queryResult
# pre_len = tdSql.queryRows
# for i in range(sample_len):
# if sample_result[pre_row:pre_row + step][i] not in pre_sample:
@@ -301,7 +297,7 @@ class TDTestCase:
# else:
# tdLog.info(f"case in {line} is success: sample data is in {group_name}")
- pass
+ pass
def sample_current_query(self) :
@@ -322,24 +318,24 @@ class TDTestCase:
self.checksample(**case6)
# # case7~8: nested query
- # case7 = {"table_expr": "(select c1 from stb1)"}
- # self.checksample(**case7)
- # case8 = {"table_expr": "(select sample(c1, 1) c1 from stb1 group by tbname)"}
- # self.checksample(**case8)
+ case7 = {"table_expr": f"(select c1 from {DBNAME}.stb1)"}
+ self.checksample(**case7)
+ case8 = {"table_expr": f"(select sample(c1, 1) c1 from {DBNAME}.stb1 group by tbname)"}
+ self.checksample(**case8)
# case9~10: mix with tbname/ts/tag/col
- # case9 = {"alias": ", tbname"}
- # self.checksample(**case9)
- # case10 = {"alias": ", _c0"}
- # self.checksample(**case10)
- # case11 = {"alias": ", st1"}
+ case9 = {"alias": ", tbname"}
+ self.checksample(**case9)
+ case10 = {"alias": ", _c0"}
+ self.checksample(**case10)
+ case11 = {"alias": ", st1"}
# self.checksample(**case11)
- tdSql.query("select sample( c1 , 1 ) , st1 from t1")
+ tdSql.query(f"select sample( c1 , 1 ) , st1 from {DBNAME}.t1")
- # case12 = {"alias": ", c1"}
+ case12 = {"alias": ", c1"}
# self.checksample(**case12)
- tdSql.query("select sample( c1 , 1 ) , c1 from t1")
+ tdSql.query(f"select sample( c1 , 1 ) , c1 from {DBNAME}.t1")
# case13~15: with single condition
case13 = {"condition": "where c1 <= 10"}
@@ -353,32 +349,31 @@ class TDTestCase:
case16 = {"condition": "where c6=1 or c6 =0"}
self.checksample(**case16)
- # # case17: only support normal table join
- # case17 = {
- # "col": "t1.c1",
- # "table_expr": "t1, t2",
- # "condition": "where t1.ts=t2.ts"
- # }
- # self.checksample(**case17)
- # # case18~19: with group by
- # case19 = {
- # "table_expr": "stb1",
- # "condition": "partition by tbname"
- # }
+ # case17: only support normal table join
+ case17 = {
+ "col": "t1.c1",
+ "table_expr": f"{DBNAME}.t1 t1 join {DBNAME}.t2 t2 on t1.ts = t2.ts",
+ }
+ self.checksample(**case17)
+ # case18~19: with group by
+ case19 = {
+ "table_expr": f"{DBNAME}.stb1",
+ "condition": "partition by tbname"
+ }
# self.checksample(**case19)
- # # case20~21: with order by
- # case20 = {"condition": "order by ts"}
+ # case20~21: with order by
+ case20 = {"condition": "order by ts"}
# self.checksample(**case20)
- # case21 = {
- # "table_expr": "stb1",
- # "condition": "partition by tbname order by tbname"
- # }
+ case21 = {
+ "table_expr": f"{DBNAME}.stb1",
+ "condition": "partition by tbname order by tbname"
+ }
# self.checksample(**case21)
# case22: with union
case22 = {
- "condition": "union all select sample( c1 , 1 ) from t2"
+ "condition": f"union all select sample( c1 , 1 ) from {DBNAME}.t2"
}
self.checksample(**case22)
@@ -396,12 +391,12 @@ class TDTestCase:
case26 = {"k": 1000}
self.checksample(**case26)
case27 = {
- "table_expr": "stb1",
+ "table_expr": f"{DBNAME}.stb1",
"condition": "group by tbname slimit 1 "
}
self.checksample(**case27) # with slimit
case28 = {
- "table_expr": "stb1",
+ "table_expr": f"{DBNAME}.stb1",
"condition": "group by tbname slimit 1 soffset 1"
}
self.checksample(**case28) # with soffset
@@ -431,7 +426,7 @@ class TDTestCase:
# err9 = {"col": "st1"}
# self.checksample(**err9) # col: tag
- tdSql.query(" select sample(st1 ,1) from t1 ")
+ tdSql.query(f"select sample(st1 ,1) from {DBNAME}.t1 ")
# err10 = {"col": 1}
# self.checksample(**err10) # col: value
# err11 = {"col": "NULL"}
@@ -494,13 +489,13 @@ class TDTestCase:
self.checksample(**err39) # mix with calculation function 2
# err40 = {"alias": "+ 2"}
# self.checksample(**err40) # mix with arithmetic 1
- # tdSql.query(" select sample(c1 , 1) + 2 from t1 ")
+ # tdSql.query(f"select sample(c1 , 1) + 2 from {dbname}.t1 ")
err41 = {"alias": "+ avg(c1)"}
# self.checksample(**err41) # mix with arithmetic 2
# err42 = {"alias": ", c1"}
# self.checksample(**err42)
- tdSql.query("select sample( c1 , 1 ) , c1 from t1")
+ tdSql.query(f"select sample( c1 , 1 ) , c1 from {DBNAME}.t1")
# mix with other col
# err43 = {"table_expr": "stb1"}
# self.checksample(**err43) # select stb directly
@@ -510,14 +505,14 @@ class TDTestCase:
# "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts"
# }
# self.checksample(**err44) # stb join
- tdSql.query("select sample( stb1.c1 , 1 ) from stb1, stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts")
+ tdSql.query(f"select sample( stb1.c1 , 1 ) from {DBNAME}.stb1 stb1, {DBNAME}.stb2 stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts")
# err45 = {
# "condition": "where ts>0 and ts < now interval(1h) fill(next)"
# }
# self.checksample(**err45) # interval
- tdSql.error("select sample( c1 , 1 ) from t1 where ts>0 and ts < now interval(1h) fill(next)")
+ tdSql.error(f"select sample( c1 , 1 ) from {DBNAME}.t1 where ts>0 and ts < now interval(1h) fill(next)")
err46 = {
- "table_expr": "t1",
+ "table_expr": f"{DBNAME}.t1",
"condition": "group by c6"
}
# self.checksample(**err46) # group by normal col
@@ -563,49 +558,45 @@ class TDTestCase:
pass
- def sample_test_data(self, tbnum:int, data_row:int, basetime:int) -> None :
+ def sample_test_data(self, tbnum:int, data_row:int, basetime:int, dbname="db") -> None :
for i in range(tbnum):
for j in range(data_row):
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into {dbname}.t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into {dbname}.t{i} values ("
f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
)
tdSql.execute(
- f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ f"insert into {dbname}.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
)
pass
- def sample_test_table(self,tbnum: int) -> None :
- tdSql.execute("drop database if exists db")
- tdSql.execute("create database if not exists db keep 3650")
- tdSql.execute("use db")
+ def sample_test_table(self,tbnum: int, dbname="db") -> None :
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650")
tdSql.execute(
- "create stable db.stb1 (\
+ f"create stable {dbname}.stb1 (\
ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
) \
tags(st1 int)"
)
tdSql.execute(
- "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(st2 int)"
)
for i in range(tbnum):
- tdSql.execute(f"create table t{i} using stb1 tags({i})")
- tdSql.execute(f"create table tt{i} using stb2 tags({i})")
-
- pass
-
+ tdSql.execute(f"create table {dbname}.t{i} using {dbname}.stb1 tags({i})")
+ tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})")
def check_sample(self , sample_query , origin_query ):
@@ -626,45 +617,43 @@ class TDTestCase:
else:
tdLog.exit(" sample data is not in datas groups ,failed sql is : %s" % sample_query )
-
- def basic_sample_query(self):
- tdSql.execute(" drop database if exists db ")
- tdSql.execute(" create database if not exists db duration 300d ")
- tdSql.execute(" use db ")
+ def basic_sample_query(self, dbname="db"):
+ tdSql.execute(f" drop database if exists {dbname} ")
+ tdSql.execute(f" create database if not exists {dbname} duration 300d ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -683,116 +672,116 @@ class TDTestCase:
# basic query for sample
# params test for all
- tdSql.error(" select sample(c1,c1) from t1 ")
- tdSql.error(" select sample(c1,now) from t1 ")
- tdSql.error(" select sample(c1,tbname) from t1 ")
- tdSql.error(" select sample(c1,ts) from t1 ")
- tdSql.error(" select sample(c1,false) from t1 ")
- tdSql.query(" select sample(123,1) from t1 ")
+ tdSql.error(f"select sample(c1,c1) from {dbname}.t1 ")
+ tdSql.error(f"select sample(c1,now) from {dbname}.t1 ")
+ tdSql.error(f"select sample(c1,tbname) from {dbname}.t1 ")
+ tdSql.error(f"select sample(c1,ts) from {dbname}.t1 ")
+ tdSql.error(f"select sample(c1,false) from {dbname}.t1 ")
+ tdSql.query(f"select sample(123,1) from {dbname}.t1 ")
- tdSql.query(" select sample(c1,2) from t1 ")
+ tdSql.query(f"select sample(c1,2) from {dbname}.t1 ")
tdSql.checkRows(2)
- tdSql.query(" select sample(c1,10) from t1 ")
+ tdSql.query(f"select sample(c1,10) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c8,10) from t1 ")
+ tdSql.query(f"select sample(c8,10) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c1,999) from t1 ")
+ tdSql.query(f"select sample(c1,999) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c1,1000) from t1 ")
+ tdSql.query(f"select sample(c1,1000) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c8,1000) from t1 ")
+ tdSql.query(f"select sample(c8,1000) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.error(" select sample(c1,-1) from t1 ")
+ tdSql.error(f"select sample(c1,-1) from {dbname}.t1 ")
# bug need fix
- # tdSql.query("select sample(c1 ,2) , 123 from stb1;")
+ # tdSql.query(f"select sample(c1 ,2) , 123 from {dbname}.stb1;")
# all type support
- tdSql.query(" select sample(c1 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c1 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c2 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c2 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c3 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c3 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c4 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c4 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c5 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c5 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c6 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c6 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c7 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c7 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c8 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c8 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c9 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c9 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c10 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c10 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- # tdSql.query(" select sample(t1 , 20 ) from ct1 ")
+ # tdSql.query(f"select sample(t1 , 20 ) from {dbname}.ct1 ")
# tdSql.checkRows(13)
# filter data
- tdSql.query(" select sample(c1, 20 ) from t1 where c1 is null ")
+ tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 is null ")
tdSql.checkRows(1)
- tdSql.query(" select sample(c1, 20 ) from t1 where c1 =6 ")
+ tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 =6 ")
tdSql.checkRows(1)
- tdSql.query(" select sample(c1, 20 ) from t1 where c1 > 6 ")
+ tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 > 6 ")
tdSql.checkRows(3)
- self.check_sample("select sample(c1, 20 ) from t1 where c1 > 6" , "select c1 from t1 where c1 > 6")
+ self.check_sample(f"select sample(c1, 20 ) from {dbname}.t1 where c1 > 6" , f"select c1 from {dbname}.t1 where c1 > 6")
- tdSql.query(" select sample( c1 , 1 ) from t1 where c1 in (0, 1,2) ")
+ tdSql.query(f"select sample( c1 , 1 ) from {dbname}.t1 where c1 in (0, 1,2) ")
tdSql.checkRows(1)
- tdSql.query("select sample( c1 ,3 ) from t1 where c1 between 1 and 10 ")
+ tdSql.query(f"select sample( c1 ,3 ) from {dbname}.t1 where c1 between 1 and 10 ")
tdSql.checkRows(3)
- self.check_sample("select sample( c1 ,3 ) from t1 where c1 between 1 and 10" ,"select c1 from t1 where c1 between 1 and 10")
+ self.check_sample(f"select sample( c1 ,3 ) from {dbname}.t1 where c1 between 1 and 10" ,f"select c1 from {dbname}.t1 where c1 between 1 and 10")
# join
- tdSql.query("select sample( ct4.c1 , 1 ) from ct1, ct4 where ct4.ts=ct1.ts")
+ tdSql.query(f"select sample( ct4.c1 , 1 ) from {dbname}.ct1 ct1, {dbname}.ct4 ct4 where ct4.ts=ct1.ts")
# partition by tbname
- tdSql.query("select sample(c1,2) from stb1 partition by tbname")
+ tdSql.query(f"select sample(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
- self.check_sample("select sample(c1,2) from stb1 partition by tbname" , "select c1 from stb1 partition by tbname")
+ self.check_sample(f"select sample(c1,2) from {dbname}.stb1 partition by tbname" , f"select c1 from {dbname}.stb1 partition by tbname")
# nest query
- # tdSql.query("select sample(c1,2) from (select c1 from t1); ")
+ # tdSql.query(f"select sample(c1,2) from (select c1 from {dbname}.t1); ")
# tdSql.checkRows(2)
# union all
- tdSql.query("select sample(c1,2) from t1 union all select sample(c1,3) from t1")
+ tdSql.query(f"select sample(c1,2) from {dbname}.t1 union all select sample(c1,3) from {dbname}.t1")
tdSql.checkRows(5)
# fill interval
# not support mix with other function
- tdSql.error("select top(c1,2) , sample(c1,2) from ct1")
- tdSql.error("select max(c1) , sample(c1,2) from ct1")
- tdSql.query("select c1 , sample(c1,2) from ct1")
+ tdSql.error(f"select top(c1,2) , sample(c1,2) from {dbname}.ct1")
+ tdSql.error(f"select max(c1) , sample(c1,2) from {dbname}.ct1")
+ tdSql.query(f"select c1 , sample(c1,2) from {dbname}.ct1")
# bug for mix with scalar
- tdSql.query("select 123 , sample(c1,100) from ct1")
- tdSql.query("select sample(c1,100)+2 from ct1")
- tdSql.query("select abs(sample(c1,100)) from ct1")
+ tdSql.query(f"select 123 , sample(c1,100) from {dbname}.ct1")
+ tdSql.query(f"select sample(c1,100)+2 from {dbname}.ct1")
+ tdSql.query(f"select abs(sample(c1,100)) from {dbname}.ct1")
- def sample_test_run(self) :
+ def sample_test_run(self, dbname="db") :
tdLog.printNoPrefix("==========support sample function==========")
tbnum = 10
nowtime = int(round(time.time() * 1000))
@@ -805,28 +794,28 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert only NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime - 5})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime + 5})")
self.sample_current_query()
self.sample_error_query()
tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):")
- # self.sample_test_table(tbnum)
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
- # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
- # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
- # self.sample_current_query()
- # self.sample_error_query()
+ self.sample_test_table(tbnum)
+ tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
+ tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
+ self.sample_current_query()
+ self.sample_error_query()
tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):")
- # self.sample_test_table(tbnum)
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
- # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
- # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
- # self.sample_current_query()
- # self.sample_error_query()
+ self.sample_test_table(tbnum)
+ tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
+ tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
+ self.sample_current_query()
+ self.sample_error_query()
tdLog.printNoPrefix("######## insert data without NULL data test:")
self.sample_test_table(tbnum)
@@ -837,16 +826,16 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert data mix with NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
self.sample_current_query()
self.sample_error_query()
tdLog.printNoPrefix("######## check after WAL test:")
- tdSql.query("select * from information_schema.ins_dnodes")
+ tdSql.query(f"select * from information_schema.ins_dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
tdDnodes.start(index)
@@ -855,19 +844,19 @@ class TDTestCase:
self.basic_sample_query()
- def sample_big_data(self):
- tdSql.execute("create database sample_db")
+ def sample_big_data(self, dbname="sample_db"):
+ tdSql.execute(f"create database {dbname}")
tdSql.execute("use sample_db")
- tdSql.execute("create stable st (ts timestamp ,c1 int ) tags(ind int)" )
- tdSql.execute("create table sub_tb using st tags(1)")
+ tdSql.execute(f"create stable {dbname}.st (ts timestamp ,c1 int ) tags(ind int)" )
+ tdSql.execute(f"create table {dbname}.sub_tb using {dbname}.st tags(1)")
for i in range(2000):
ts = self.ts+i*10
- tdSql.execute(f"insert into sub_tb values({ts} ,{i})")
+ tdSql.execute(f"insert into {dbname}.sub_tb values({ts} ,{i})")
- tdSql.query("select count(*) from st")
+ tdSql.query(f"select count(*) from {dbname}.st")
tdSql.checkData(0,0,2000)
- tdSql.query("select sample(c1 ,1000) from st")
+ tdSql.query(f"select sample(c1 ,1000) from {dbname}.st")
tdSql.checkRows(1000)
# bug need fix
diff --git a/tests/system-test/2-query/sin.py b/tests/system-test/2-query/sin.py
index 7cb559c510..a1ba335487 100644
--- a/tests/system-test/2-query/sin.py
+++ b/tests/system-test/2-query/sin.py
@@ -9,48 +9,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
- def prepare_datas(self):
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
+
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -65,14 +63,15 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
-
+
def check_result_auto_sin(self ,origin_query , pow_query):
pow_result = tdSql.getResult(pow_query)
+
origin_result = tdSql.getResult(origin_query)
auto_result =[]
-
+
for row in origin_result:
row_check = []
for elem in row:
@@ -82,190 +81,179 @@ class TDTestCase:
elem = math.sin(elem)
row_check.append(elem)
auto_result.append(row_check)
-
- check_status = True
-
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("sin function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("sin value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def test_errors(self):
+ tdSql.checkData(row_index ,col_index ,auto_result[row_index][col_index])
+
+
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select sin from t1",
- # "select sin(-+--+c1 ) from t1",
- # "select +-sin(c1) from t1",
- # "select ++-sin(c1) from t1",
- # "select ++--sin(c1) from t1",
- # "select - -sin(c1)*0 from t1",
- # "select sin(tbname+1) from t1 ",
- "select sin(123--123)==1 from t1",
- "select sin(c1) as 'd1' from t1",
- "select sin(c1 ,c2) from t1",
- "select sin(c1 ,NULL ) from t1",
- "select sin(,) from t1;",
- "select sin(sin(c1) ab from t1)",
- "select sin(c1 ) as int from t1",
- "select sin from stb1",
- # "select sin(-+--+c1) from stb1",
- # "select +-sin(c1) from stb1",
- # "select ++-sin(c1) from stb1",
- # "select ++--sin(c1) from stb1",
- # "select - -sin(c1)*0 from stb1",
- # "select sin(tbname+1) from stb1 ",
- "select sin(123--123)==1 from stb1",
- "select sin(c1) as 'd1' from stb1",
- "select sin(c1 ,c2 ) from stb1",
- "select sin(c1 ,NULL) from stb1",
- "select sin(,) from stb1;",
- "select sin(sin(c1) ab from stb1)",
- "select sin(c1) as int from stb1"
+ f"select sin from {dbname}.t1",
+ # f"select sin(-+--+c1 ) from {dbname}.t1",
+ # f"select +-sin(c1) from {dbname}.t1",
+ # f"select ++-sin(c1) from {dbname}.t1",
+ # f"select ++--sin(c1) from {dbname}.t1",
+ # f"select - -sin(c1)*0 from {dbname}.t1",
+ # f"select sin(tbname+1) from {dbname}.t1 ",
+ f"select sin(123--123)==1 from {dbname}.t1",
+ f"select sin(c1) as 'd1' from {dbname}.t1",
+ f"select sin(c1 ,c2) from {dbname}.t1",
+ f"select sin(c1 ,NULL ) from {dbname}.t1",
+ f"select sin(,) from {dbname}.t1;",
+ f"select sin(sin(c1) ab from {dbname}.t1)",
+ f"select sin(c1 ) as int from {dbname}.t1",
+ f"select sin from {dbname}.stb1",
+ # f"select sin(-+--+c1) from {dbname}.stb1",
+ # f"select +-sin(c1) from {dbname}.stb1",
+ # f"select ++-sin(c1) from {dbname}.stb1",
+ # f"select ++--sin(c1) from {dbname}.stb1",
+ # f"select - -sin(c1)*0 from {dbname}.stb1",
+ # f"select sin(tbname+1) from {dbname}.stb1 ",
+ f"select sin(123--123)==1 from {dbname}.stb1",
+ f"select sin(c1) as 'd1' from {dbname}.stb1",
+ f"select sin(c1 ,c2 ) from {dbname}.stb1",
+ f"select sin(c1 ,NULL) from {dbname}.stb1",
+ f"select sin(,) from {dbname}.stb1;",
+ f"select sin(sin(c1) ab from {dbname}.stb1)",
+ f"select sin(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
-
- def support_types(self):
+
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select sin(ts) from t1" ,
- "select sin(c7) from t1",
- "select sin(c8) from t1",
- "select sin(c9) from t1",
- "select sin(ts) from ct1" ,
- "select sin(c7) from ct1",
- "select sin(c8) from ct1",
- "select sin(c9) from ct1",
- "select sin(ts) from ct3" ,
- "select sin(c7) from ct3",
- "select sin(c8) from ct3",
- "select sin(c9) from ct3",
- "select sin(ts) from ct4" ,
- "select sin(c7) from ct4",
- "select sin(c8) from ct4",
- "select sin(c9) from ct4",
- "select sin(ts) from stb1" ,
- "select sin(c7) from stb1",
- "select sin(c8) from stb1",
- "select sin(c9) from stb1" ,
+ f"select sin(ts) from {dbname}.t1" ,
+ f"select sin(c7) from {dbname}.t1",
+ f"select sin(c8) from {dbname}.t1",
+ f"select sin(c9) from {dbname}.t1",
+ f"select sin(ts) from {dbname}.ct1" ,
+ f"select sin(c7) from {dbname}.ct1",
+ f"select sin(c8) from {dbname}.ct1",
+ f"select sin(c9) from {dbname}.ct1",
+ f"select sin(ts) from {dbname}.ct3" ,
+ f"select sin(c7) from {dbname}.ct3",
+ f"select sin(c8) from {dbname}.ct3",
+ f"select sin(c9) from {dbname}.ct3",
+ f"select sin(ts) from {dbname}.ct4" ,
+ f"select sin(c7) from {dbname}.ct4",
+ f"select sin(c8) from {dbname}.ct4",
+ f"select sin(c9) from {dbname}.ct4",
+ f"select sin(ts) from {dbname}.stb1" ,
+ f"select sin(c7) from {dbname}.stb1",
+ f"select sin(c8) from {dbname}.stb1",
+ f"select sin(c9) from {dbname}.stb1" ,
- "select sin(ts) from stbbb1" ,
- "select sin(c7) from stbbb1",
+ f"select sin(ts) from {dbname}.stbbb1" ,
+ f"select sin(c7) from {dbname}.stbbb1",
- "select sin(ts) from tbname",
- "select sin(c9) from tbname"
+ f"select sin(ts) from {dbname}.tbname",
+ f"select sin(c9) from {dbname}.tbname"
]
-
+
for type_sql in type_error_sql_lists:
tdSql.error(type_sql)
-
-
+
+
type_sql_lists = [
- "select sin(c1) from t1",
- "select sin(c2) from t1",
- "select sin(c3) from t1",
- "select sin(c4) from t1",
- "select sin(c5) from t1",
- "select sin(c6) from t1",
+ f"select sin(c1) from {dbname}.t1",
+ f"select sin(c2) from {dbname}.t1",
+ f"select sin(c3) from {dbname}.t1",
+ f"select sin(c4) from {dbname}.t1",
+ f"select sin(c5) from {dbname}.t1",
+ f"select sin(c6) from {dbname}.t1",
- "select sin(c1) from ct1",
- "select sin(c2) from ct1",
- "select sin(c3) from ct1",
- "select sin(c4) from ct1",
- "select sin(c5) from ct1",
- "select sin(c6) from ct1",
+ f"select sin(c1) from {dbname}.ct1",
+ f"select sin(c2) from {dbname}.ct1",
+ f"select sin(c3) from {dbname}.ct1",
+ f"select sin(c4) from {dbname}.ct1",
+ f"select sin(c5) from {dbname}.ct1",
+ f"select sin(c6) from {dbname}.ct1",
- "select sin(c1) from ct3",
- "select sin(c2) from ct3",
- "select sin(c3) from ct3",
- "select sin(c4) from ct3",
- "select sin(c5) from ct3",
- "select sin(c6) from ct3",
+ f"select sin(c1) from {dbname}.ct3",
+ f"select sin(c2) from {dbname}.ct3",
+ f"select sin(c3) from {dbname}.ct3",
+ f"select sin(c4) from {dbname}.ct3",
+ f"select sin(c5) from {dbname}.ct3",
+ f"select sin(c6) from {dbname}.ct3",
- "select sin(c1) from stb1",
- "select sin(c2) from stb1",
- "select sin(c3) from stb1",
- "select sin(c4) from stb1",
- "select sin(c5) from stb1",
- "select sin(c6) from stb1",
+ f"select sin(c1) from {dbname}.stb1",
+ f"select sin(c2) from {dbname}.stb1",
+ f"select sin(c3) from {dbname}.stb1",
+ f"select sin(c4) from {dbname}.stb1",
+ f"select sin(c5) from {dbname}.stb1",
+ f"select sin(c6) from {dbname}.stb1",
- "select sin(c6) as alisb from stb1",
- "select sin(c6) alisb from stb1",
+ f"select sin(c6) as alisb from {dbname}.stb1",
+ f"select sin(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
-
- def basic_sin_function(self):
- # basic query
- tdSql.query("select c1 from ct3")
+ def basic_sin_function(self, dbname="db"):
+
+ # basic query
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select sin(c1) from ct3")
+ tdSql.query(f"select sin(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c2) from ct3")
+ tdSql.query(f"select sin(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c3) from ct3")
+ tdSql.query(f"select sin(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c4) from ct3")
+ tdSql.query(f"select sin(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c5) from ct3")
+ tdSql.query(f"select sin(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c6) from ct3")
+ tdSql.query(f"select sin(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select sin(c1) from t1")
+ tdSql.query(f"select sin(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 0.841470985)
tdSql.checkData(3 , 0, 0.141120008)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_sin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from t1")
-
+ self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from {dbname}.t1")
+
# used for sub table
- tdSql.query("select c2 ,sin(c2) from ct1")
+ tdSql.query(f"select c2 ,sin(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, -0.220708349)
tdSql.checkData(1 , 1, -0.556921845)
tdSql.checkData(3 , 1, -0.798311364)
tdSql.checkData(4 , 1, 0.000000000)
- tdSql.query("select c1, c5 ,sin(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,sin(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, 0.518228108)
tdSql.checkData(2 , 2, 0.996475613)
tdSql.checkData(3 , 2, 0.367960369)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_sin( "select c1, c2, c3 , c4, c5 from ct1", "select sin(c1), sin(c2) ,sin(c3), sin(c4), sin(c5) from ct1")
-
+ self.check_result_auto_sin( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select sin(c1), sin(c2) ,sin(c3), sin(c4), sin(c5) from {dbname}.ct1")
+
# nest query for sin functions
- tdSql.query("select c4 , sin(c4) ,sin(sin(c4)) , sin(sin(sin(c4))) from ct1;")
+ tdSql.query(f"select c4 , sin(c4) ,sin(sin(c4)) , sin(sin(sin(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , 0.035398303)
tdSql.checkData(0 , 2 , 0.035390911)
@@ -281,52 +269,52 @@ class TDTestCase:
tdSql.checkData(11 , 2 , 0.841042171)
tdSql.checkData(11 , 3 , 0.745338326)
- # used for stable table
-
- tdSql.query("select sin(c1) from stb1")
+ # used for stable table
+
+ tdSql.query(f"select sin(c1) from {dbname}.stb1")
tdSql.checkRows(25)
-
+
# used for not exists table
- tdSql.error("select sin(c1) from stbbb1")
- tdSql.error("select sin(c1) from tbname")
- tdSql.error("select sin(c1) from ct5")
+ tdSql.error(f"select sin(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select sin(c1) from {dbname}.tbname")
+ tdSql.error(f"select sin(c1) from {dbname}.ct5")
+
+ # mix with common col
+ tdSql.query(f"select c1, sin(c1) from {dbname}.ct1")
+ tdSql.query(f"select c2, sin(c2) from {dbname}.ct4")
- # mix with common col
- tdSql.query("select c1, sin(c1) from ct1")
- tdSql.query("select c2, sin(c2) from ct4")
-
# mix with common functions
- tdSql.query("select c1, sin(c1),sin(c1), sin(sin(c1)) from ct4 ")
+ tdSql.query(f"select c1, sin(c1),sin(c1), sin(sin(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None)
-
+
tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 ,-0.279415498)
tdSql.checkData(3 , 2 ,-0.279415498)
tdSql.checkData(3 , 3 ,-0.275793863)
- tdSql.query("select c1, sin(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, sin(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, sin(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, sin(c1),c5, count(c5) from ct1 ")
- tdSql.error("select sin(c1), count(c5) from stb1 ")
- tdSql.error("select sin(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, sin(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, sin(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select sin(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select sin(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
-
- # # bug fix for compute
- tdSql.query("select c1, sin(c1) -0 ,sin(c1-4)-0 from ct4 ")
+
+ # # bug fix for compute
+ tdSql.query(f"select c1, sin(c1) -0 ,sin(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -334,7 +322,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 0.989358247)
tdSql.checkData(1, 2, -0.756802495)
- tdSql.query(" select c1, sin(c1) -0 ,sin(c1-0.1)-0.1 from ct4")
+ tdSql.query(f"select c1, sin(c1) -0 ,sin(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -342,35 +330,34 @@ class TDTestCase:
tdSql.checkData(1, 1, 0.989358247)
tdSql.checkData(1, 2, 0.898941342)
- tdSql.query("select c1, sin(c1), c2, sin(c2), c3, sin(c3) from ct1")
+ tdSql.query(f"select c1, sin(c1), c2, sin(c2), c3, sin(c3) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, sin(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.sin(100000000))
- tdSql.query("select c1, sin(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.sin(10000000000000))
- tdSql.query("select c1, sin(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sin(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sin(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, math.sin(10000000000000000000000000.0))
- tdSql.query("select c1, sin(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sin(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.sin(10000000000000000000000000000000000.0))
- tdSql.query("select c1, sin(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sin(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.sin(10000000000000000000000000000000000000000.0))
- tdSql.query("select c1, sin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -378,7 +365,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,1.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -386,7 +373,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,-1.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1=sin(c1) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1=sin(c1) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,0)
@@ -394,45 +381,40 @@ class TDTestCase:
tdSql.checkData(0,3,0.000000000)
tdSql.checkData(0,4,-0.100000000)
tdSql.checkData(0,5,0.000000000)
-
- def pow_Arithmetic(self):
- pass
-
- def check_boundary_values(self):
+
+ def check_boundary_values(self, dbname="testdb"):
PI=3.1415926
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_sin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from sub1_bound")
-
- self.check_result_auto_sin( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sin(c1), sin(c2) ,sin(c3), sin(c3), sin(c2) ,sin(c1) from sub1_bound")
+ self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)) from {dbname}.sub1_bound")
+
+ self.check_result_auto_sin( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select sin(c1), sin(c2) ,sin(c3), sin(c3), sin(c2) ,sin(c1) from {dbname}.sub1_bound")
+
+ self.check_result_auto_sin(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select sin(abs(c1)) from {dbname}.sub1_bound" )
- self.check_result_auto_sin("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sin(abs(c1)) from sub1_bound" )
-
# check basic elem for table per row
- tdSql.query("select sin(abs(c1)) ,sin(abs(c2)) , sin(abs(c3)) , sin(abs(c4)), sin(abs(c5)), sin(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select sin(abs(c1)) ,sin(abs(c2)) , sin(abs(c3)) , sin(abs(c4)), sin(abs(c5)), sin(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.sin(2147483647))
tdSql.checkData(0,1,math.sin(9223372036854775807))
tdSql.checkData(0,2,math.sin(32767))
@@ -450,83 +432,79 @@ class TDTestCase:
tdSql.checkData(3,4,math.sin(339999995214436424907732413799364296704.00000))
# check + - * / in functions
- tdSql.query("select sin(abs(c1+1)) ,sin(abs(c2)) , sin(abs(c3*1)) , sin(abs(c4/2)), sin(abs(c5))/2, sin(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select sin(abs(c1+1)) ,sin(abs(c2)) , sin(abs(c3*1)) , sin(abs(c4/2)), sin(abs(c5))/2, sin(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.sin(2147483648.000000000))
tdSql.checkData(0,1,math.sin(9223372036854775807))
tdSql.checkData(0,2,math.sin(32767.000000000))
tdSql.checkData(0,3,math.sin(63.500000000))
- tdSql.execute("create stable st (ts timestamp, num1 float, num2 double) tags (t1 int);")
- tdSql.execute(f'create table tb1 using st tags (1)')
- tdSql.execute(f'create table tb2 using st tags (2)')
- tdSql.execute(f'create table tb3 using st tags (3)')
- tdSql.execute('insert into tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb1 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
+ tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);")
+ tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)')
+ tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)')
+ tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})')
- tdSql.execute('insert into tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb2 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})')
- for i in range(100):
- tdSql.execute('insert into tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2))
+ self.check_result_auto_sin(f"select num1,num2 from {dbname}.tb3;" , f"select sin(num1),sin(num2) from {dbname}.tb3")
- self.check_result_auto_sin("select num1,num2 from tb3;" , "select sin(num1),sin(num2) from tb3")
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto_sin( f"select c5 from {dbname}.stb1 order by ts " , f"select sin(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_sin( f"select c5 from {dbname}.stb1 order by tbname " , f"select sin(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_sin( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sin( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_sin( " select c5 from stb1 order by ts " , "select sin(c5) from stb1 order by ts" )
- self.check_result_auto_sin( " select c5 from stb1 order by tbname " , "select sin(c5) from stb1 order by tbname" )
- self.check_result_auto_sin( " select c5 from stb1 where c1 > 0 order by tbname " , "select sin(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sin( " select c5 from stb1 where c1 > 0 order by tbname " , "select sin(c5) from stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select sin(t1), sin(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select sin(t1) ,sin(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(t1) ,sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(t1) , sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sin( " select t1,c5 from stb1 order by ts " , "select sin(t1), sin(c5) from stb1 order by ts" )
- self.check_result_auto_sin( " select t1,c5 from stb1 order by tbname " , "select sin(t1) ,sin(c5) from stb1 order by tbname" )
- self.check_result_auto_sin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sin(t1) ,sin(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sin(t1) , sin(c5) from stb1 where c1 > 0 order by tbname" )
- pass
-
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- tdLog.printNoPrefix("==========step2:test errors ==============")
+ tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
-
- tdLog.printNoPrefix("==========step3:support types ============")
+
+ tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
- tdLog.printNoPrefix("==========step4: sin basic query ============")
+ tdLog.printNoPrefix("==========step4: sin basic query ============")
self.basic_sin_function()
- tdLog.printNoPrefix("==========step5: big number sin query ============")
-
- self.test_big_number()
-
-
- tdLog.printNoPrefix("==========step6: sin boundary query ============")
-
- self.check_boundary_values()
-
- tdLog.printNoPrefix("==========step7: sin filter query ============")
+ tdLog.printNoPrefix("==========step5: sin filter query ============")
self.abs_func_filter()
+ tdLog.printNoPrefix("==========step6: big number sin query ============")
+
+ self.test_big_number()
+
+
+ tdLog.printNoPrefix("==========step7: sin boundary query ============")
+
+ self.check_boundary_values()
+
+
tdLog.printNoPrefix("==========step8: check sin result of stable query ============")
self.support_super_table_test()
-
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/smaTest.py b/tests/system-test/2-query/smaTest.py
index 67824cc3a3..0217b6c28c 100644
--- a/tests/system-test/2-query/smaTest.py
+++ b/tests/system-test/2-query/smaTest.py
@@ -30,14 +30,6 @@ class TDTestCase:
# updatecfgDict = {'debugFlag': 135}
# updatecfgDict = {'fqdn': 135}
- def caseDescription(self):
- '''
- limit and offset keyword function test cases;
- case1: limit offset base function test
- case2: offset return valid
- '''
- return
-
# init
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@@ -47,11 +39,12 @@ class TDTestCase:
self.ts = 1500000000000
- # run case
+ # run case
def run(self):
# insert data
- self.insert_data1("t1", self.ts, 1000*10000)
- self.insert_data1("t4", self.ts, 1000*10000)
+ dbname = "db"
+ self.insert_data1(f"{dbname}.t1", self.ts, 1000*10000)
+ self.insert_data1(f"{dbname}.t4", self.ts, 1000*10000)
# test base case
# self.test_case1()
tdLog.debug(" LIMIT test_case1 ............ [OK]")
@@ -60,7 +53,7 @@ class TDTestCase:
tdLog.debug(" LIMIT test_case2 ............ [OK]")
- # stop
+ # stop
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
@@ -70,16 +63,16 @@ class TDTestCase:
#
# create table
- def create_tables(self):
+ def create_tables(self, dbname="db"):
# super table
- tdSql.execute("create table st(ts timestamp, i1 int,i2 int) tags(area int)");
+ tdSql.execute(f"create table {dbname}.st(ts timestamp, i1 int,i2 int) tags(area int)")
# child table
- tdSql.execute("create table t1 using st tags(1)");
+ tdSql.execute(f"create table {dbname}.t1 using {dbname}.st tags(1)")
- tdSql.execute("create table st1(ts timestamp, i1 int ,i2 int) tags(area int) sma(i2) ");
- tdSql.execute("create table t4 using st1 tags(1)");
+ tdSql.execute(f"create table {dbname}.st1(ts timestamp, i1 int ,i2 int) tags(area int) sma(i2) ")
+ tdSql.execute(f"create table {dbname}.t4 using {dbname}.st1 tags(1)")
- return
+ return
# insert data1
def insert_data(self, tbname, ts_start, count):
@@ -91,7 +84,7 @@ class TDTestCase:
if i >0 and i%30000 == 0:
tdSql.execute(sql)
sql = pre_insert
- # end sql
+ # end sql
if sql != pre_insert:
tdSql.execute(sql)
@@ -107,16 +100,16 @@ class TDTestCase:
if i >0 and i%30000 == 0:
tdSql.execute(sql)
sql = pre_insert
- # end sql
+ # end sql
if sql != pre_insert:
tdSql.execute(sql)
tdLog.debug("INSERT TABLE DATA ............ [OK]")
return
- # test case1 base
+ # test case1 base
# def test_case1(self):
- # #
+ # #
# # limit base function
# #
# # base no where
diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py
index 5d74c568ce..b7e167c8b5 100644
--- a/tests/system-test/2-query/sml.py
+++ b/tests/system-test/2-query/sml.py
@@ -20,7 +20,7 @@ class TDTestCase:
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
- def checkFileContent(self):
+ def checkFileContent(self, dbname="sml_db"):
buildPath = tdCom.getBuildPath()
cmdStr = '%s/build/bin/sml_test'%(buildPath)
tdLog.info(cmdStr)
@@ -28,8 +28,8 @@ class TDTestCase:
if ret != 0:
tdLog.exit("sml_test failed")
- tdSql.execute('use sml_db')
- tdSql.query("select * from t_b7d815c9222ca64cdf2614c61de8f211")
+ # tdSql.execute('use sml_db')
+ tdSql.query(f"select * from {dbname}.t_b7d815c9222ca64cdf2614c61de8f211")
tdSql.checkRows(1)
tdSql.checkData(0, 0, '2016-01-01 08:00:07.000')
@@ -44,35 +44,35 @@ class TDTestCase:
tdSql.checkData(0, 9, 0)
tdSql.checkData(0, 10, 25)
- tdSql.query("select * from readings")
+ tdSql.query(f"select * from {dbname}.readings")
tdSql.checkRows(9)
- tdSql.query("select distinct tbname from readings")
+ tdSql.query(f"select distinct tbname from {dbname}.readings")
tdSql.checkRows(4)
- tdSql.query("select * from t_0799064f5487946e5d22164a822acfc8 order by _ts")
+ tdSql.query(f"select * from {dbname}.t_0799064f5487946e5d22164a822acfc8 order by _ts")
tdSql.checkRows(2)
tdSql.checkData(0, 3, "kk")
tdSql.checkData(1, 3, None)
- tdSql.query("select distinct tbname from `sys.if.bytes.out`")
+ tdSql.query(f"select distinct tbname from {dbname}.`sys.if.bytes.out`")
tdSql.checkRows(2)
- tdSql.query("select * from t_fc70dec6677d4277c5d9799c4da806da order by _ts")
+ tdSql.query(f"select * from {dbname}.t_fc70dec6677d4277c5d9799c4da806da order by _ts")
tdSql.checkRows(2)
tdSql.checkData(0, 1, 1.300000000)
tdSql.checkData(1, 1,13.000000000)
- tdSql.query("select * from `sys.procs.running`")
+ tdSql.query(f"select * from {dbname}.`sys.procs.running`")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 42.000000000)
tdSql.checkData(0, 2, "web01")
- tdSql.query("select distinct tbname from `sys.cpu.nice`")
+ tdSql.query(f"select distinct tbname from {dbname}.`sys.cpu.nice`")
tdSql.checkRows(2)
- tdSql.query("select * from `sys.cpu.nice` order by _ts")
+ tdSql.query(f"select * from {dbname}.`sys.cpu.nice` order by _ts")
tdSql.checkRows(2)
tdSql.checkData(0, 1, 9.000000000)
tdSql.checkData(0, 2, "lga")
@@ -83,10 +83,10 @@ class TDTestCase:
tdSql.checkData(1, 3, "web01")
tdSql.checkData(1, 4, "t1")
- tdSql.query("select * from macylr")
+ tdSql.query(f"select * from {dbname}.macylr")
tdSql.checkRows(2)
- tdSql.query("desc macylr")
+ tdSql.query(f"desc {dbname}.macylr")
tdSql.checkRows(25)
return
diff --git a/tests/system-test/2-query/spread.py b/tests/system-test/2-query/spread.py
index 51c569e565..ffe86ff363 100644
--- a/tests/system-test/2-query/spread.py
+++ b/tests/system-test/2-query/spread.py
@@ -26,6 +26,8 @@ TS_TYPE_COL = [ TS_COL, ]
ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ]
+DBNAME = "db"
+
class TDTestCase:
def init(self, conn, logSql):
@@ -88,6 +90,7 @@ class TDTestCase:
return join_condition
def __where_condition(self, col=None, tbname=None, query_conditon=None):
+ # tbname = tbname.split(".")[-1] if tbname else None
if query_conditon and isinstance(query_conditon, str):
if query_conditon.startswith("count"):
query_conditon = query_conditon[6:-1]
@@ -129,32 +132,33 @@ class TDTestCase:
return f"select spread({select_clause}) from {from_clause} {where_condition} {group_condition}"
@property
- def __tb_list(self):
+ def __tb_list(self, dbname=DBNAME):
return [
- "ct1",
- "ct4",
- "t1",
- "ct2",
- "stb1",
+ f"{dbname}.ct1",
+ f"{dbname}.ct4",
+ f"{dbname}.t1",
+ f"{dbname}.ct2",
+ f"{dbname}.stb1",
]
def sql_list(self):
sqls = []
__no_join_tblist = self.__tb_list
for tb in __no_join_tblist:
- select_claus_list = self.__query_condition(tb)
- for select_claus in select_claus_list:
- group_claus = self.__group_condition(col=select_claus)
- where_claus = self.__where_condition(query_conditon=select_claus)
- having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
- sqls.extend(
- (
- self.__single_sql(select_claus, tb, where_claus, having_claus),
- self.__single_sql(select_claus, tb,),
- self.__single_sql(select_claus, tb, where_condition=where_claus),
- self.__single_sql(select_claus, tb, group_condition=group_claus),
- )
+ tbname = tb.split(".")[-1]
+ select_claus_list = self.__query_condition(tbname)
+ for select_claus in select_claus_list:
+ group_claus = self.__group_condition(col=select_claus)
+ where_claus = self.__where_condition(query_conditon=select_claus)
+ having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
+ sqls.extend(
+ (
+ self.__single_sql(select_claus, tb, where_claus, having_claus),
+ self.__single_sql(select_claus, tb,),
+ self.__single_sql(select_claus, tb, where_condition=where_claus),
+ self.__single_sql(select_claus, tb, group_condition=group_claus),
)
+ )
# return filter(None, sqls)
return list(filter(None, sqls))
@@ -166,28 +170,28 @@ class TDTestCase:
tdLog.info(f"sql: {sqls[i]}")
tdSql.query(sqls[i])
- def __test_current(self):
- tdSql.query("select spread(ts) from ct1")
+ def __test_current(self, dbname=DBNAME):
+ tdSql.query(f"select spread(ts) from {dbname}.ct1")
tdSql.checkRows(1)
- tdSql.query("select spread(c1) from ct2")
+ tdSql.query(f"select spread(c1) from {dbname}.ct2")
tdSql.checkRows(1)
- tdSql.query("select spread(c1) from ct4 group by c1")
+ tdSql.query(f"select spread(c1) from {dbname}.ct4 group by c1")
tdSql.checkRows(self.rows + 3)
- tdSql.query("select spread(c1) from ct4 group by c7")
+ tdSql.query(f"select spread(c1) from {dbname}.ct4 group by c7")
tdSql.checkRows(3)
- tdSql.query("select spread(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts")
+ tdSql.query(f"select spread(ct2.c1) from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.checkRows(1)
self.spread_check()
- def __test_error(self):
+ def __test_error(self, dbname=DBNAME):
tdLog.printNoPrefix("===step 0: err case, must return err")
- tdSql.error( "select spread() from ct1" )
- tdSql.error( "select spread(1, 2) from ct2" )
- tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from ct4" )
- tdSql.error( f"select spread({BOOLEAN_COL[0]}) from t1" )
- tdSql.error( f"select spread({CHAR_COL[0]}) from stb1" )
+ tdSql.error( f"select spread() from {dbname}.ct1" )
+ tdSql.error( f"select spread(1, 2) from {dbname}.ct2" )
+ tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from {dbname}.ct4" )
+ tdSql.error( f"select spread({BOOLEAN_COL[0]}) from {dbname}.t1" )
+ tdSql.error( f"select spread({CHAR_COL[0]}) from {dbname}.stb1" )
# tdSql.error( ''' select spread(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
# from ct1
@@ -196,20 +200,20 @@ class TDTestCase:
# having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' )
# tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
- def all_test(self):
- self.__test_error()
- self.__test_current()
+ def all_test(self, dbname=DBNAME):
+ self.__test_error(dbname)
+ self.__test_current(dbname)
- def __create_tb(self):
+ def __create_tb(self, dbname=DBNAME):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -219,30 +223,30 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname=DBNAME):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -258,7 +262,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -274,13 +278,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
diff --git a/tests/system-test/2-query/sqrt.py b/tests/system-test/2-query/sqrt.py
index 425d59f118..9597375885 100644
--- a/tests/system-test/2-query/sqrt.py
+++ b/tests/system-test/2-query/sqrt.py
@@ -9,48 +9,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -85,84 +83,74 @@ class TDTestCase:
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
-
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("sqrt function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("sqrt value check pass , it work as expected ,sql is \"%s\" "%pow_query )
+ tdSql.checkData(row_index ,col_index ,auto_result[row_index][col_index])
- def test_errors(self):
+
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select sqrt from t1",
- # "select sqrt(-+--+c1 ) from t1",
- # "select +-sqrt(c1) from t1",
- # "select ++-sqrt(c1) from t1",
- # "select ++--sqrt(c1) from t1",
- # "select - -sqrt(c1)*0 from t1",
- # "select sqrt(tbname+1) from t1 ",
- "select sqrt(123--123)==1 from t1",
- "select sqrt(c1) as 'd1' from t1",
- "select sqrt(c1 ,c2) from t1",
- "select sqrt(c1 ,NULL ) from t1",
- "select sqrt(,) from t1;",
- "select sqrt(sqrt(c1) ab from t1)",
- "select sqrt(c1 ) as int from t1",
- "select sqrt from stb1",
- # "select sqrt(-+--+c1) from stb1",
- # "select +-sqrt(c1) from stb1",
- # "select ++-sqrt(c1) from stb1",
- # "select ++--sqrt(c1) from stb1",
- # "select - -sqrt(c1)*0 from stb1",
- # "select sqrt(tbname+1) from stb1 ",
- "select sqrt(123--123)==1 from stb1",
- "select sqrt(c1) as 'd1' from stb1",
- "select sqrt(c1 ,c2 ) from stb1",
- "select sqrt(c1 ,NULL) from stb1",
- "select sqrt(,) from stb1;",
- "select sqrt(sqrt(c1) ab from stb1)",
- "select sqrt(c1) as int from stb1"
+ f"select sqrt from {dbname}.t1",
+ # f"select sqrt(-+--+c1 ) from {dbname}.t1",
+ # f"select +-sqrt(c1) from {dbname}.t1",
+ # f"select ++-sqrt(c1) from {dbname}.t1",
+ # f"select ++--sqrt(c1) from {dbname}.t1",
+ # f"select - -sqrt(c1)*0 from {dbname}.t1",
+ # f"select sqrt(tbname+1) from {dbname}.t1 ",
+ f"select sqrt(123--123)==1 from {dbname}.t1",
+ f"select sqrt(c1) as 'd1' from {dbname}.t1",
+ f"select sqrt(c1 ,c2) from {dbname}.t1",
+ f"select sqrt(c1 ,NULL ) from {dbname}.t1",
+ f"select sqrt(,) from {dbname}.t1;",
+ f"select sqrt(sqrt(c1) ab from {dbname}.t1)",
+ f"select sqrt(c1 ) as int from {dbname}.t1",
+ f"select sqrt from {dbname}.stb1",
+ # f"select sqrt(-+--+c1) from {dbname}.stb1",
+ # f"select +-sqrt(c1) from {dbname}.stb1",
+ # f"select ++-sqrt(c1) from {dbname}.stb1",
+ # f"select ++--sqrt(c1) from {dbname}.stb1",
+ # f"select - -sqrt(c1)*0 from {dbname}.stb1",
+ # f"select sqrt(tbname+1) from {dbname}.stb1 ",
+ f"select sqrt(123--123)==1 from {dbname}.stb1",
+ f"select sqrt(c1) as 'd1' from {dbname}.stb1",
+ f"select sqrt(c1 ,c2 ) from {dbname}.stb1",
+ f"select sqrt(c1 ,NULL) from {dbname}.stb1",
+ f"select sqrt(,) from {dbname}.stb1;",
+ f"select sqrt(sqrt(c1) ab from {dbname}.stb1)",
+ f"select sqrt(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select sqrt(ts) from t1" ,
- "select sqrt(c7) from t1",
- "select sqrt(c8) from t1",
- "select sqrt(c9) from t1",
- "select sqrt(ts) from ct1" ,
- "select sqrt(c7) from ct1",
- "select sqrt(c8) from ct1",
- "select sqrt(c9) from ct1",
- "select sqrt(ts) from ct3" ,
- "select sqrt(c7) from ct3",
- "select sqrt(c8) from ct3",
- "select sqrt(c9) from ct3",
- "select sqrt(ts) from ct4" ,
- "select sqrt(c7) from ct4",
- "select sqrt(c8) from ct4",
- "select sqrt(c9) from ct4",
- "select sqrt(ts) from stb1" ,
- "select sqrt(c7) from stb1",
- "select sqrt(c8) from stb1",
- "select sqrt(c9) from stb1" ,
+ f"select sqrt(ts) from {dbname}.t1" ,
+ f"select sqrt(c7) from {dbname}.t1",
+ f"select sqrt(c8) from {dbname}.t1",
+ f"select sqrt(c9) from {dbname}.t1",
+ f"select sqrt(ts) from {dbname}.ct1" ,
+ f"select sqrt(c7) from {dbname}.ct1",
+ f"select sqrt(c8) from {dbname}.ct1",
+ f"select sqrt(c9) from {dbname}.ct1",
+ f"select sqrt(ts) from {dbname}.ct3" ,
+ f"select sqrt(c7) from {dbname}.ct3",
+ f"select sqrt(c8) from {dbname}.ct3",
+ f"select sqrt(c9) from {dbname}.ct3",
+ f"select sqrt(ts) from {dbname}.ct4" ,
+ f"select sqrt(c7) from {dbname}.ct4",
+ f"select sqrt(c8) from {dbname}.ct4",
+ f"select sqrt(c9) from {dbname}.ct4",
+ f"select sqrt(ts) from {dbname}.stb1" ,
+ f"select sqrt(c7) from {dbname}.stb1",
+ f"select sqrt(c8) from {dbname}.stb1",
+ f"select sqrt(c9) from {dbname}.stb1" ,
- "select sqrt(ts) from stbbb1" ,
- "select sqrt(c7) from stbbb1",
+ f"select sqrt(ts) from {dbname}.stbbb1" ,
+ f"select sqrt(c7) from {dbname}.stbbb1",
- "select sqrt(ts) from tbname",
- "select sqrt(c9) from tbname"
+ f"select sqrt(ts) from {dbname}.tbname",
+ f"select sqrt(c9) from {dbname}.tbname"
]
@@ -171,103 +159,103 @@ class TDTestCase:
type_sql_lists = [
- "select sqrt(c1) from t1",
- "select sqrt(c2) from t1",
- "select sqrt(c3) from t1",
- "select sqrt(c4) from t1",
- "select sqrt(c5) from t1",
- "select sqrt(c6) from t1",
+ f"select sqrt(c1) from {dbname}.t1",
+ f"select sqrt(c2) from {dbname}.t1",
+ f"select sqrt(c3) from {dbname}.t1",
+ f"select sqrt(c4) from {dbname}.t1",
+ f"select sqrt(c5) from {dbname}.t1",
+ f"select sqrt(c6) from {dbname}.t1",
- "select sqrt(c1) from ct1",
- "select sqrt(c2) from ct1",
- "select sqrt(c3) from ct1",
- "select sqrt(c4) from ct1",
- "select sqrt(c5) from ct1",
- "select sqrt(c6) from ct1",
+ f"select sqrt(c1) from {dbname}.ct1",
+ f"select sqrt(c2) from {dbname}.ct1",
+ f"select sqrt(c3) from {dbname}.ct1",
+ f"select sqrt(c4) from {dbname}.ct1",
+ f"select sqrt(c5) from {dbname}.ct1",
+ f"select sqrt(c6) from {dbname}.ct1",
- "select sqrt(c1) from ct3",
- "select sqrt(c2) from ct3",
- "select sqrt(c3) from ct3",
- "select sqrt(c4) from ct3",
- "select sqrt(c5) from ct3",
- "select sqrt(c6) from ct3",
+ f"select sqrt(c1) from {dbname}.ct3",
+ f"select sqrt(c2) from {dbname}.ct3",
+ f"select sqrt(c3) from {dbname}.ct3",
+ f"select sqrt(c4) from {dbname}.ct3",
+ f"select sqrt(c5) from {dbname}.ct3",
+ f"select sqrt(c6) from {dbname}.ct3",
- "select sqrt(c1) from stb1",
- "select sqrt(c2) from stb1",
- "select sqrt(c3) from stb1",
- "select sqrt(c4) from stb1",
- "select sqrt(c5) from stb1",
- "select sqrt(c6) from stb1",
+ f"select sqrt(c1) from {dbname}.stb1",
+ f"select sqrt(c2) from {dbname}.stb1",
+ f"select sqrt(c3) from {dbname}.stb1",
+ f"select sqrt(c4) from {dbname}.stb1",
+ f"select sqrt(c5) from {dbname}.stb1",
+ f"select sqrt(c6) from {dbname}.stb1",
- "select sqrt(c6) as alisb from stb1",
- "select sqrt(c6) alisb from stb1",
+ f"select sqrt(c6) as alisb from {dbname}.stb1",
+ f"select sqrt(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_sqrt_function(self):
+ def basic_sqrt_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select sqrt(c1) from ct3")
+ tdSql.query(f"select sqrt(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c2) from ct3")
+ tdSql.query(f"select sqrt(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c3) from ct3")
+ tdSql.query(f"select sqrt(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c4) from ct3")
+ tdSql.query(f"select sqrt(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c5) from ct3")
+ tdSql.query(f"select sqrt(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c6) from ct3")
+ tdSql.query(f"select sqrt(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select sqrt(c1) from t1")
+ tdSql.query(f"select sqrt(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1.000000000)
tdSql.checkData(3 , 0, 1.732050808)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from t1")
+ self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.t1")
# used for sub table
- tdSql.query("select c2 ,sqrt(c2) from ct1")
+ tdSql.query(f"select c2 ,sqrt(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, 298.140906284)
tdSql.checkData(1 , 1, 278.885281074)
tdSql.checkData(3 , 1, 235.701081881)
tdSql.checkData(4 , 1, 0.000000000)
- tdSql.query("select c1, c5 ,sqrt(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,sqrt(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, 2.979932904)
tdSql.checkData(2 , 2, 2.787471970)
tdSql.checkData(3 , 2, 2.580697551)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_sqrt( "select c1, c2, c3 , c4, c5 from ct1", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from ct1")
+ self.check_result_auto_sqrt( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from {dbname}.ct1")
# nest query for sqrt functions
- tdSql.query("select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from ct1;")
+ tdSql.query(f"select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , 9.380831520)
tdSql.checkData(0 , 2 , 3.062814314)
@@ -285,22 +273,22 @@ class TDTestCase:
# used for stable table
- tdSql.query("select sqrt(c1) from stb1")
+ tdSql.query(f"select sqrt(c1) from {dbname}.stb1")
tdSql.checkRows(25)
# used for not exists table
- tdSql.error("select sqrt(c1) from stbbb1")
- tdSql.error("select sqrt(c1) from tbname")
- tdSql.error("select sqrt(c1) from ct5")
+ tdSql.error(f"select sqrt(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select sqrt(c1) from {dbname}.tbname")
+ tdSql.error(f"select sqrt(c1) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, sqrt(c1) from ct1")
+ tdSql.query(f"select c1, sqrt(c1) from {dbname}.ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,2.828427125)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,0.000000000)
- tdSql.query("select c2, sqrt(c2) from ct4")
+ tdSql.query(f"select c2, sqrt(c2) from {dbname}.ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,55555)
@@ -309,7 +297,7 @@ class TDTestCase:
tdSql.checkData(5 , 1 ,None)
# mix with common functions
- tdSql.query("select c1, sqrt(c1),sqrt(c1), sqrt(sqrt(c1)) from ct4 ")
+ tdSql.query(f"select c1, sqrt(c1),sqrt(c1), sqrt(sqrt(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -320,34 +308,34 @@ class TDTestCase:
tdSql.checkData(3 , 2 ,2.449489743)
tdSql.checkData(3 , 3 ,1.565084580)
- tdSql.query("select c1, sqrt(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, sqrt(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, sqrt(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, sqrt(c1),c5, count(c5) from ct1 ")
- tdSql.error("select sqrt(c1), count(c5) from stb1 ")
- tdSql.error("select sqrt(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, sqrt(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, sqrt(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select sqrt(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select sqrt(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
# # bug fix for compute
- tdSql.query("select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from ct4 ")
+ tdSql.query(f"select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -355,7 +343,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 2.828427125)
tdSql.checkData(1, 2, 2.000000000)
- tdSql.query(" select c1, sqrt(c1) -0 ,sqrt(c1-0.1)-0.1 from ct4")
+ tdSql.query(f"select c1, sqrt(c1) -0 ,sqrt(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -363,57 +351,56 @@ class TDTestCase:
tdSql.checkData(1, 1, 2.828427125)
tdSql.checkData(1, 2, 2.710693865)
- tdSql.query("select c1, sqrt(c1), c2, sqrt(c2), c3, sqrt(c3) from ct1")
+ tdSql.query(f"select c1, sqrt(c1), c2, sqrt(c2), c3, sqrt(c3) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, sqrt(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, 10000.000000000)
- tdSql.query("select c1, sqrt(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, 3162277.660168380)
- tdSql.query("select c1, sqrt(c1) + sqrt(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sqrt(c1) + sqrt(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sqrt(c1) + sqrt(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(c1) + sqrt(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, 3162277660171.025390625)
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, 100000000000000000.000000000)
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, 100000000000000000000.000000000)
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def pow_base_test(self):
+ def pow_base_test(self, dbname="db"):
# base is an regular number ,int or double
- tdSql.query("select c1, sqrt(c1) from ct1")
+ tdSql.query(f"select c1, sqrt(c1) from {dbname}.ct1")
tdSql.checkData(0, 1,2.828427125)
tdSql.checkRows(13)
# # bug for compute in functions
- # tdSql.query("select c1, abs(1/0) from ct1")
+ # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1")
# tdSql.checkData(0, 0, 8)
# tdSql.checkData(0, 1, 1)
- tdSql.query("select c1, sqrt(1) from ct1")
+ tdSql.query(f"select c1, sqrt(1) from {dbname}.ct1")
tdSql.checkData(0, 1, 1.000000000)
tdSql.checkRows(13)
# two cols start sqrt(x,y)
- tdSql.query("select c1,c2, sqrt(c2) from ct1")
+ tdSql.query(f"select c1,c2, sqrt(c2) from {dbname}.ct1")
tdSql.checkData(0, 2, 298.140906284)
tdSql.checkData(1, 2, 278.885281074)
tdSql.checkData(4, 2, 0.000000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -421,7 +408,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,3.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -429,7 +416,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -437,7 +424,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=sqrt(c1) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=sqrt(c1) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,11111)
@@ -446,42 +433,37 @@ class TDTestCase:
tdSql.checkData(0,4,0.900000000)
tdSql.checkData(0,5,1.000000000)
- def pow_Arithmetic(self):
- pass
+ def check_boundary_values(self, dbname="bound_test"):
- def check_boundary_values(self):
-
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from sub1_bound")
+ self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.sub1_bound")
- self.check_result_auto_sqrt( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from sub1_bound")
+ self.check_result_auto_sqrt( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from {dbname}.sub1_bound")
- self.check_result_auto_sqrt("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sqrt(abs(c1)) from sub1_bound" )
+ self.check_result_auto_sqrt(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select sqrt(abs(c1)) from {dbname}.sub1_bound" )
# check basic elem for table per row
- tdSql.query("select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.sqrt(2147483647))
tdSql.checkData(0,1,math.sqrt(9223372036854775807))
tdSql.checkData(0,2,math.sqrt(32767))
@@ -499,23 +481,22 @@ class TDTestCase:
tdSql.checkData(3,4,math.sqrt(339999995214436424907732413799364296704.00000))
# check + - * / in functions
- tdSql.query("select sqrt(abs(c1+1)) ,sqrt(abs(c2)) , sqrt(abs(c3*1)) , sqrt(abs(c4/2)), sqrt(abs(c5))/2, sqrt(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select sqrt(abs(c1+1)) ,sqrt(abs(c2)) , sqrt(abs(c3*1)) , sqrt(abs(c4/2)), sqrt(abs(c5))/2, sqrt(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.sqrt(2147483648.000000000))
tdSql.checkData(0,1,math.sqrt(9223372036854775807))
tdSql.checkData(0,2,math.sqrt(32767.000000000))
tdSql.checkData(0,3,math.sqrt(63.500000000))
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_sqrt( " select c5 from stb1 order by ts " , "select sqrt(c5) from stb1 order by ts" )
- self.check_result_auto_sqrt( " select c5 from stb1 order by tbname " , "select sqrt(c5) from stb1 order by tbname" )
- self.check_result_auto_sqrt( " select c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sqrt( " select c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(c5) from stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 order by ts " , f"select sqrt(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sqrt( " select t1,c5 from stb1 order by ts " , "select sqrt(t1), sqrt(c5) from stb1 order by ts" )
- self.check_result_auto_sqrt( " select t1,c5 from stb1 order by tbname " , "select sqrt(t1) ,sqrt(c5) from stb1 order by tbname" )
- self.check_result_auto_sqrt( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(t1) ,sqrt(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sqrt( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(t1) , sqrt(c5) from stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select sqrt(t1), sqrt(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select sqrt(t1) ,sqrt(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(t1) ,sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(t1) , sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
diff --git a/tests/system-test/2-query/statecount.py b/tests/system-test/2-query/statecount.py
index a88c4aef9f..c73c955de4 100644
--- a/tests/system-test/2-query/statecount.py
+++ b/tests/system-test/2-query/statecount.py
@@ -11,50 +11,47 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -70,68 +67,68 @@ class TDTestCase:
'''
)
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- # "select statecount(c1,'GT',5) from t1"
- "select statecount from t1",
- "select statecount(123--123)==1 from t1",
- "select statecount(123,123) from t1",
- "select statecount(c1,ts) from t1",
- "select statecount(c1,c1,ts) from t1",
- "select statecount(c1 ,c2 ) from t1",
- "select statecount(c1 ,NULL) from t1",
- #"select statecount(c1 ,'NULL',1.0) from t1",
- "select statecount(c1 ,'GT','1') from t1",
- "select statecount(c1 ,'GT','tbname') from t1",
- "select statecount(c1 ,'GT','*') from t1",
- "select statecount(c1 ,'GT',ts) from t1",
- "select statecount(c1 ,'GT',max(c1)) from t1",
- # "select statecount(abs(c1) ,'GT',1) from t1",
- # "select statecount(c1+2 ,'GT',1) from t1",
- "select statecount(c1 ,'GT',1,1u) from t1",
- "select statecount(c1 ,'GT',1,now) from t1",
- "select statecount(c1 ,'GT','1') from t1",
- "select statecount(c1 ,'GT','1',True) from t1",
- "select statecount(statecount(c1) ab from t1)",
- "select statecount(c1 ,'GT',1,,)int from t1",
- "select statecount('c1','GT',1) from t1",
- "select statecount('c1','GT' , NULL) from t1",
- "select statecount('c1','GT', 1 , '') from t1",
- "select statecount('c1','GT', 1 ,c%) from t1",
- "select statecount(c1 ,'GT',1,t1) from t1",
- "select statecount(c1 ,'GT',1,True) from t1",
- "select statecount(c1 ,'GT',1) , count(c1) from t1",
- "select statecount(c1 ,'GT',1) , avg(c1) from t1",
- "select statecount(c1 ,'GT',1) , min(c1) from t1",
- "select statecount(c1 ,'GT',1) , spread(c1) from t1",
- "select statecount(c1 ,'GT',1) , diff(c1) from t1",
+ # f"select statecount(c1,'GT',5) from {dbname}.t1"
+ f"select statecount from {dbname}.t1",
+ f"select statecount(123--123)==1 from {dbname}.t1",
+ f"select statecount(123,123) from {dbname}.t1",
+ f"select statecount(c1,ts) from {dbname}.t1",
+ f"select statecount(c1,c1,ts) from {dbname}.t1",
+ f"select statecount(c1 ,c2 ) from {dbname}.t1",
+ f"select statecount(c1 ,NULL) from {dbname}.t1",
+ #f"select statecount(c1 ,'NULL',1.0) from {dbname}.t1",
+ f"select statecount(c1 ,'GT','1') from {dbname}.t1",
+ f"select statecount(c1 ,'GT','tbname') from {dbname}.t1",
+ f"select statecount(c1 ,'GT','*') from {dbname}.t1",
+ f"select statecount(c1 ,'GT',ts) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',max(c1)) from {dbname}.t1",
+ # f"select statecount(abs(c1) ,'GT',1) from {dbname}.t1",
+ # f"select statecount(c1+2 ,'GT',1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1,1u) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1,now) from {dbname}.t1",
+ f"select statecount(c1 ,'GT','1') from {dbname}.t1",
+ f"select statecount(c1 ,'GT','1',True) from {dbname}.t1",
+ f"select statecount(statecount(c1) ab from {dbname}.t1)",
+ f"select statecount(c1 ,'GT',1,,)int from {dbname}.t1",
+ f"select statecount('c1','GT',1) from {dbname}.t1",
+ f"select statecount('c1','GT' , NULL) from {dbname}.t1",
+ f"select statecount('c1','GT', 1 , '') from {dbname}.t1",
+ f"select statecount('c1','GT', 1 ,c%) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1,t1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1,True) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , count(c1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , avg(c1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , min(c1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , spread(c1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , diff(c1) from {dbname}.t1",
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
pass
- def support_types(self):
+ def support_types(self, dbname="db"):
other_no_value_types = [
- "select statecount(ts,'GT',1) from t1" ,
- "select statecount(c7,'GT',1) from t1",
- "select statecount(c8,'GT',1) from t1",
- "select statecount(c9,'GT',1) from t1",
- "select statecount(ts,'GT',1) from ct1" ,
- "select statecount(c7,'GT',1) from ct1",
- "select statecount(c8,'GT',1) from ct1",
- "select statecount(c9,'GT',1) from ct1",
- "select statecount(ts,'GT',1) from ct3" ,
- "select statecount(c7,'GT',1) from ct3",
- "select statecount(c8,'GT',1) from ct3",
- "select statecount(c9,'GT',1) from ct3",
- "select statecount(ts,'GT',1) from ct4" ,
- "select statecount(c7,'GT',1) from ct4",
- "select statecount(c8,'GT',1) from ct4",
- "select statecount(c9,'GT',1) from ct4",
- "select statecount(ts,'GT',1) from stb1 partition by tbname" ,
- "select statecount(c7,'GT',1) from stb1 partition by tbname",
- "select statecount(c8,'GT',1) from stb1 partition by tbname",
- "select statecount(c9,'GT',1) from stb1 partition by tbname"
+ f"select statecount(ts,'GT',1) from {dbname}.t1" ,
+ f"select statecount(c7,'GT',1) from {dbname}.t1",
+ f"select statecount(c8,'GT',1) from {dbname}.t1",
+ f"select statecount(c9,'GT',1) from {dbname}.t1",
+ f"select statecount(ts,'GT',1) from {dbname}.ct1" ,
+ f"select statecount(c7,'GT',1) from {dbname}.ct1",
+ f"select statecount(c8,'GT',1) from {dbname}.ct1",
+ f"select statecount(c9,'GT',1) from {dbname}.ct1",
+ f"select statecount(ts,'GT',1) from {dbname}.ct3" ,
+ f"select statecount(c7,'GT',1) from {dbname}.ct3",
+ f"select statecount(c8,'GT',1) from {dbname}.ct3",
+ f"select statecount(c9,'GT',1) from {dbname}.ct3",
+ f"select statecount(ts,'GT',1) from {dbname}.ct4" ,
+ f"select statecount(c7,'GT',1) from {dbname}.ct4",
+ f"select statecount(c8,'GT',1) from {dbname}.ct4",
+ f"select statecount(c9,'GT',1) from {dbname}.ct4",
+ f"select statecount(ts,'GT',1) from {dbname}.stb1 partition by tbname" ,
+ f"select statecount(c7,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c8,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c9,'GT',1) from {dbname}.stb1 partition by tbname"
]
for type_sql in other_no_value_types:
@@ -139,224 +136,222 @@ class TDTestCase:
tdLog.info("support type ok , sql is : %s"%type_sql)
type_sql_lists = [
- "select statecount(c1,'GT',1) from t1",
- "select statecount(c2,'GT',1) from t1",
- "select statecount(c3,'GT',1) from t1",
- "select statecount(c4,'GT',1) from t1",
- "select statecount(c5,'GT',1) from t1",
- "select statecount(c6,'GT',1) from t1",
+ f"select statecount(c1,'GT',1) from {dbname}.t1",
+ f"select statecount(c2,'GT',1) from {dbname}.t1",
+ f"select statecount(c3,'GT',1) from {dbname}.t1",
+ f"select statecount(c4,'GT',1) from {dbname}.t1",
+ f"select statecount(c5,'GT',1) from {dbname}.t1",
+ f"select statecount(c6,'GT',1) from {dbname}.t1",
- "select statecount(c1,'GT',1) from ct1",
- "select statecount(c2,'GT',1) from ct1",
- "select statecount(c3,'GT',1) from ct1",
- "select statecount(c4,'GT',1) from ct1",
- "select statecount(c5,'GT',1) from ct1",
- "select statecount(c6,'GT',1) from ct1",
+ f"select statecount(c1,'GT',1) from {dbname}.ct1",
+ f"select statecount(c2,'GT',1) from {dbname}.ct1",
+ f"select statecount(c3,'GT',1) from {dbname}.ct1",
+ f"select statecount(c4,'GT',1) from {dbname}.ct1",
+ f"select statecount(c5,'GT',1) from {dbname}.ct1",
+ f"select statecount(c6,'GT',1) from {dbname}.ct1",
- "select statecount(c1,'GT',1) from ct3",
- "select statecount(c2,'GT',1) from ct3",
- "select statecount(c3,'GT',1) from ct3",
- "select statecount(c4,'GT',1) from ct3",
- "select statecount(c5,'GT',1) from ct3",
- "select statecount(c6,'GT',1) from ct3",
+ f"select statecount(c1,'GT',1) from {dbname}.ct3",
+ f"select statecount(c2,'GT',1) from {dbname}.ct3",
+ f"select statecount(c3,'GT',1) from {dbname}.ct3",
+ f"select statecount(c4,'GT',1) from {dbname}.ct3",
+ f"select statecount(c5,'GT',1) from {dbname}.ct3",
+ f"select statecount(c6,'GT',1) from {dbname}.ct3",
- "select statecount(c1,'GT',1) from stb1 partition by tbname",
- "select statecount(c2,'GT',1) from stb1 partition by tbname",
- "select statecount(c3,'GT',1) from stb1 partition by tbname",
- "select statecount(c4,'GT',1) from stb1 partition by tbname",
- "select statecount(c5,'GT',1) from stb1 partition by tbname",
- "select statecount(c6,'GT',1) from stb1 partition by tbname",
+ f"select statecount(c1,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c2,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c3,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c4,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c5,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c6,'GT',1) from {dbname}.stb1 partition by tbname",
- "select statecount(c6,'GT',1) as alisb from stb1 partition by tbname",
- "select statecount(c6,'GT',1) alisb from stb1 partition by tbname",
+ f"select statecount(c6,'GT',1) as alisb from {dbname}.stb1 partition by tbname",
+ f"select statecount(c6,'GT',1) alisb from {dbname}.stb1 partition by tbname",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def support_opers(self):
+ def support_opers(self, dbname="db"):
oper_lists = ['LT','lt','Lt','lT','GT','gt','Gt','gT','LE','le','Le','lE','GE','ge','Ge','gE','NE','ne','Ne','nE','EQ','eq','Eq','eQ']
oper_errors = [",","*","NULL","tbname","ts","sum","_c0"]
for oper in oper_lists:
- tdSql.query(f"select statecount(c1 ,'{oper}',1) as col from t1")
+ tdSql.query(f"select statecount(c1 ,'{oper}',1) as col from {dbname}.t1")
tdSql.checkRows(12)
for oper in oper_errors:
- tdSql.error(f"select statecount(c1 ,'{oper}',1) as col from t1")
+ tdSql.error(f"select statecount(c1 ,'{oper}',1) as col from {dbname}.t1")
-
- def basic_statecount_function(self):
+ def basic_statecount_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
# will support _rowts mix with
- # tdSql.query("select (c6,'GT',1),_rowts from ct3")
+ # tdSql.query(f"select (c6,'GT',1),_rowts from {dbname}.ct3")
# auto check for t1 table
# used for regular table
- tdSql.query("select statecount(c6,'GT',1) from t1")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.t1")
# unique with super tags
- tdSql.query("select statecount(c6,'GT',1) from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1) from ct4")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct4")
tdSql.checkRows(12)
- tdSql.query("select statecount(c6,'GT',1),tbname from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1),tbname from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1),t1 from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1),t1 from {dbname}.ct1")
tdSql.checkRows(13)
# unique with common col
- tdSql.query("select statecount(c6,'GT',1) ,ts from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) ,ts from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, statecount(c6,'GT',1) from ct1")
+ tdSql.query(f"select ts, statecount(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1) ,c1 from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) ,c1 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select c1, statecount(c6,'GT',1) from ct1")
+ tdSql.query(f"select c1, statecount(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, c1, c2, c3, statecount(c6,'GT',1) from ct1")
+ tdSql.query(f"select ts, c1, c2, c3, statecount(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1), ts, c1, c2, c3 from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1), ts, c1, c2, c3 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, c1, c2, c3, statecount(c6,'GT',1), ts, c4, c5, c6 from ct1")
+ tdSql.query(f"select ts, c1, c2, c3, statecount(c6,'GT',1), ts, c4, c5, c6 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select stateduration(c6,'GT',1) ,ts from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1) ,ts from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, stateduration(c6,'GT',1) from ct1")
+ tdSql.query(f"select ts, stateduration(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select stateduration(c6,'GT',1) ,c1 from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1) ,c1 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select c1, stateduration(c6,'GT',1) from ct1")
+ tdSql.query(f"select c1, stateduration(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, c1, c2, c3, stateduration(c6,'GT',1) from ct1")
+ tdSql.query(f"select ts, c1, c2, c3, stateduration(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select stateduration(c6,'GT',1), ts, c1, c2, c3 from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1), ts, c1, c2, c3 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, c1, c2, c3, stateduration(c6,'GT',1), ts, c4, c5, c6 from ct1")
+ tdSql.query(f"select ts, c1, c2, c3, stateduration(c6,'GT',1), ts, c4, c5, c6 from {dbname}.ct1")
tdSql.checkRows(13)
# unique with scalar function
- tdSql.query("select statecount(c6,'GT',1) , abs(c1) from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) , abs(c1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1) , abs(c2)+2 from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) , abs(c2)+2 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.error("select statecount(c6,'GT',1) , unique(c2) from ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) , unique(c2) from {dbname}.ct1")
- tdSql.query("select stateduration(c6,'GT',1) , abs(c1) from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1) , abs(c1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select stateduration(c6,'GT',1) , abs(c2)+2 from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1) , abs(c2)+2 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.error("select stateduration(c6,'GT',1) , unique(c2) from ct1")
+ tdSql.error(f"select stateduration(c6,'GT',1) , unique(c2) from {dbname}.ct1")
# unique with aggregate function
- tdSql.error("select statecount(c6,'GT',1) ,sum(c1) from ct1")
- tdSql.error("select statecount(c6,'GT',1) ,max(c1) from ct1")
- tdSql.error("select statecount(c6,'GT',1) ,csum(c1) from ct1")
- tdSql.error("select statecount(c6,'GT',1) ,count(c1) from ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) ,sum(c1) from {dbname}.ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) ,max(c1) from {dbname}.ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) ,csum(c1) from {dbname}.ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) ,count(c1) from {dbname}.ct1")
# unique with filter where
- tdSql.query("select statecount(c6,'GT',1) from ct4 where c1 is null")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct4 where c1 is null")
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
tdSql.checkData(2, 0, None)
- tdSql.query("select statecount(c1,'GT',1) from t1 where c1 >2 ")
+ tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.t1 where c1 >2 ")
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 2)
tdSql.checkData(2, 0, 3)
tdSql.checkData(4, 0, 5)
tdSql.checkData(5, 0, 6)
- tdSql.query("select statecount(c2,'GT',1) from t1 where c2 between 0 and 99999")
+ tdSql.query(f"select statecount(c2,'GT',1) from {dbname}.t1 where c2 between 0 and 99999")
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 2)
tdSql.checkData(6, 0, -1)
# unique with union all
- tdSql.query("select statecount(c1,'GT',1) from ct4 union all select statecount(c1,'GT',1) from ct1")
+ tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.ct4 union all select statecount(c1,'GT',1) from {dbname}.ct1")
tdSql.checkRows(25)
- tdSql.query("select statecount(c1,'GT',1) from ct4 union all select distinct(c1) from ct4")
+ tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.ct4 union all select distinct(c1) from {dbname}.ct4")
tdSql.checkRows(22)
# unique with join
# prepare join datas with same ts
- tdSql.execute(" use db ")
- tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table tb1 using st1 tags(1)")
- tdSql.execute(" create table tb2 using st1 tags(2)")
+ tdSql.execute(f"create stable {dbname}.st1 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(f"create table {dbname}.tb1 using {dbname}.st1 tags(1)")
+ tdSql.execute(f"create table {dbname}.tb2 using {dbname}.st1 tags(2)")
- tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table ttb1 using st2 tags(1)")
- tdSql.execute(" create table ttb2 using st2 tags(2)")
+ tdSql.execute(f"create stable {dbname}.st2 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(f"create table {dbname}.ttb1 using {dbname}.st2 tags(1)")
+ tdSql.execute(f"create table {dbname}.ttb2 using {dbname}.st2 tags(2)")
start_ts = 1622369635000 # 2021-05-30 18:13:55
for i in range(10):
ts_value = start_ts+i*1000
- tdSql.execute(f" insert into tb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into tb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})")
- tdSql.query("select statecount(tb1.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts ")
+ tdSql.query(f"select statecount(tb1.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts ")
tdSql.checkRows(10)
tdSql.checkData(0,0,-1)
tdSql.checkData(1,0,-1)
tdSql.checkData(2,0,1)
tdSql.checkData(9,0,8)
- tdSql.query("select statecount(tb1.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts union all select statecount(tb2.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts ")
+ tdSql.query(f"select statecount(tb1.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts union all select statecount(tb2.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts ")
tdSql.checkRows(20)
# nest query
- # tdSql.query("select unique(c1) from (select c1 from ct1)")
- tdSql.query("select c1 from (select statecount(c1,'GT',1) c1 from t1)")
+ # tdSql.query(f"select unique(c1) from (select c1 from {dbname}.ct1)")
+ tdSql.query(f"select c1 from (select statecount(c1,'GT',1) c1 from {dbname}.t1)")
tdSql.checkRows(12)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, -1)
tdSql.checkData(2, 0, 1)
tdSql.checkData(10, 0, 8)
- tdSql.query("select sum(c1) from (select statecount(c1,'GT',1) c1 from t1)")
+ tdSql.query(f"select sum(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.t1)")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 35)
- tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select statecount(c1,'GT',1) c1 from ct1)")
+ tdSql.query(f"select sum(c1) from (select distinct(c1) c1 from {dbname}.ct1) union all select sum(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.ct1)")
tdSql.checkRows(2)
- tdSql.query("select 1-abs(c1) from (select statecount(c1,'GT',1) c1 from t1)")
+ tdSql.query(f"select 1-abs(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.t1)")
tdSql.checkRows(12)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 0.000000000)
@@ -365,43 +360,41 @@ class TDTestCase:
# bug for stable
#partition by tbname
- # tdSql.query(" select unique(c1) from stb1 partition by tbname ")
+ # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ")
# tdSql.checkRows(21)
- # tdSql.query(" select unique(c1) from stb1 partition by tbname ")
+ # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ")
# tdSql.checkRows(21)
# group by
- tdSql.error("select statecount(c1,'GT',1) from ct1 group by c1")
- tdSql.error("select statecount(c1,'GT',1) from ct1 group by tbname")
+ tdSql.error(f"select statecount(c1,'GT',1) from {dbname}.ct1 group by c1")
+ tdSql.error(f"select statecount(c1,'GT',1) from {dbname}.ct1 group by tbname")
- # super table
-
- def check_unit_time(self):
- tdSql.execute(" use db ")
- tdSql.error("select stateduration(c1,'GT',1,1b) from ct1")
- tdSql.error("select stateduration(c1,'GT',1,1u) from ct1")
- tdSql.error("select stateduration(c1,'GT',1,1000s) from t1")
- tdSql.error("select stateduration(c1,'GT',1,10m) from t1")
- tdSql.error("select stateduration(c1,'GT',1,10d) from t1")
- tdSql.query("select stateduration(c1,'GT',1,1s) from t1")
+ def check_unit_time(self, dbname="db"):
+ tdSql.error(f"select stateduration(c1,'GT',1,1b) from {dbname}.ct1")
+ tdSql.error(f"select stateduration(c1,'GT',1,1u) from {dbname}.ct1")
+ tdSql.error(f"select stateduration(c1,'GT',1,1000s) from {dbname}.t1")
+ tdSql.error(f"select stateduration(c1,'GT',1,10m) from {dbname}.t1")
+ tdSql.error(f"select stateduration(c1,'GT',1,10d) from {dbname}.t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1s) from {dbname}.t1")
tdSql.checkData(10,0,63072035)
- tdSql.query("select stateduration(c1,'GT',1,1m) from t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1m) from {dbname}.t1")
tdSql.checkData(10,0,int(63072035/60))
- tdSql.query("select stateduration(c1,'GT',1,1h) from t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1h) from {dbname}.t1")
tdSql.checkData(10,0,int(63072035/60/60))
- tdSql.query("select stateduration(c1,'GT',1,1d) from t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1d) from {dbname}.t1")
tdSql.checkData(10,0,int(63072035/60/24/60))
- tdSql.query("select stateduration(c1,'GT',1,1w) from t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1w) from {dbname}.t1")
tdSql.checkData(10,0,int(63072035/60/7/24/60))
def query_precision(self):
def generate_data(precision="ms"):
- tdSql.execute("create database if not exists db_%s precision '%s';" %(precision, precision))
+ dbname = f"db_{precision}"
+ tdSql.execute(f"create database if not exists db_%s precision '%s';" %(precision, precision))
tdSql.execute("use db_%s;" %precision)
- tdSql.execute("create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision)
- tdSql.execute("create table db_%s.tb1 using st tags(1);"%precision)
- tdSql.execute("create table db_%s.tb2 using st tags(2);"%precision)
+ tdSql.execute(f"create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision)
+ tdSql.execute(f"create table db_%s.tb1 using {dbname}.st tags(1);"%precision)
+ tdSql.execute(f"create table db_%s.tb2 using {dbname}.st tags(2);"%precision)
if precision == "ms":
start_ts = self.ts
@@ -432,55 +425,54 @@ class TDTestCase:
if pres == "ms":
if unit in ["1u","1b"]:
- tdSql.error("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.error(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
pass
else:
- tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
elif pres == "us" and unit in ["1b"]:
if unit in ["1b"]:
- tdSql.error("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.error(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
pass
else:
- tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
else:
- tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
basic_result = 70
tdSql.checkData(9,0,basic_result*pow(1000,index))
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- tdSql.query("select statecount(c1,'GT',1) from sub1_bound")
+ tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.sub1_bound")
tdSql.checkRows(5)
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
diff --git a/tests/system-test/2-query/substr.py b/tests/system-test/2-query/substr.py
index f833a42b57..ea55c5e44e 100644
--- a/tests/system-test/2-query/substr.py
+++ b/tests/system-test/2-query/substr.py
@@ -127,16 +127,16 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__substr_check(tb, CURRENT_POS, LENS)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__substr_err_check(tb):
@@ -145,22 +145,21 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (tag1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -170,29 +169,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -208,7 +207,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -224,13 +223,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -259,10 +258,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
-
- tdSql.execute("use db")
+ tdSql.execute("flush database db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
diff --git a/tests/system-test/2-query/sum.py b/tests/system-test/2-query/sum.py
index 4f5ed34419..dbc79e25f5 100644
--- a/tests/system-test/2-query/sum.py
+++ b/tests/system-test/2-query/sum.py
@@ -89,14 +89,14 @@ class TDTestCase:
return sqls
- def __test_current(self):
+ def __test_current(self, dbname="db"):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = ["ct1", "ct2", "ct4", "t1"]
for tb in tbname:
self.__sum_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = ["ct1", "ct2", "ct4", "t1"]
@@ -106,21 +106,21 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
- def __create_tb(self):
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table {DBNAME}.stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (tag1 int)
'''
- create_ntb_sql = f'''create table {DBNAME}.t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -130,83 +130,82 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table {DBNAME}.ct{i+1} using {DBNAME}.stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into {DBNAME}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into {DBNAME}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into {DBNAME}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into {DBNAME}.ct1 values
- ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
- ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
+ f'''insert into {dbname}.ct1 values
+ ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
+ ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into {DBNAME}.ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
- { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000}
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
- { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000}
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
- f'''insert into {DBNAME}.ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
- { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
- { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
- insert_data = f'''insert into {DBNAME}.t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
- "binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
+ "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into {DBNAME}.t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
- "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
- "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
-
def run(self):
tdSql.prepare()
@@ -219,12 +218,8 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- # tdDnodes.stop(1)
- # tdDnodes.start(1)
-
tdSql.execute("flush database db")
-
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
diff --git a/tests/system-test/2-query/tail.py b/tests/system-test/2-query/tail.py
index d708873d6f..687023f57e 100644
--- a/tests/system-test/2-query/tail.py
+++ b/tests/system-test/2-query/tail.py
@@ -10,49 +10,46 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
- def prepare_datas(self):
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
+
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -67,115 +64,115 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
-
- def test_errors(self):
+
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select tail from t1",
- "select tail(123--123)==1 from t1",
- "select tail(123,123) from t1",
- "select tail(c1,ts) from t1",
- "select tail(c1,c1,ts) from t1",
- "select tail(c1) as 'd1' from t1",
- "select tail(c1 ,c2 ) from t1",
- "select tail(c1 ,NULL) from t1",
- "select tail(,) from t1;",
- "select tail(tail(c1) ab from t1)",
- "select tail(c1) as int from t1",
- "select tail('c1') from t1",
- "select tail(NULL) from t1",
- "select tail('') from t1",
- "select tail(c%) from t1",
- "select tail(t1) from t1",
- "select tail(True) from t1",
- "select tail(c1,1) , count(c1) from t1",
- "select tail(c1,1) , avg(c1) from t1",
- "select tail(c1,1) , min(c1) from t1",
- "select tail(c1,1) , spread(c1) from t1",
- "select tail(c1,1) , diff(c1) from t1",
- "select tail from stb1 partition by tbname",
- "select tail(123--123)==1 from stb1 partition by tbname",
- "select tail(123,123) from stb1 partition by tbname",
- "select tail(c1,ts) from stb1 partition by tbname",
- "select tail(c1,c1,ts) from stb1 partition by tbname",
- "select tail(c1) as 'd1' from stb1 partition by tbname",
- "select tail(c1 ,c2 ) from stb1 partition by tbname",
- "select tail(c1 ,NULL) from stb1 partition by tbname",
- "select tail(,) from stb1 partition by tbname;",
- "select tail(tail(c1) ab from stb1 partition by tbname)",
- "select tail(c1) as int from stb1 partition by tbname",
- "select tail('c1') from stb1 partition by tbname",
- "select tail(NULL) from stb1 partition by tbname",
- "select tail('') from stb1 partition by tbname",
- "select tail(c%) from stb1 partition by tbname",
- "select tail(t1) from stb1 partition by tbname",
- "select tail(True) from stb1 partition by tbname",
- "select tail(c1,1) , count(c1) from stb1 partition by tbname",
- "select tail(c1,1) , avg(c1) from stb1 partition by tbname",
- "select tail(c1,1) , min(c1) from stb1 partition by tbname",
- "select tail(c1,1) , spread(c1) from stb1 partition by tbname",
- "select tail(c1,1) , diff(c1) from stb1 partition by tbname",
+ f"select tail from {dbname}.t1",
+ f"select tail(123--123)==1 from {dbname}.t1",
+ f"select tail(123,123) from {dbname}.t1",
+ f"select tail(c1,ts) from {dbname}.t1",
+ f"select tail(c1,c1,ts) from {dbname}.t1",
+ f"select tail(c1) as 'd1' from {dbname}.t1",
+ f"select tail(c1 ,c2 ) from {dbname}.t1",
+ f"select tail(c1 ,NULL) from {dbname}.t1",
+ f"select tail(,) from {dbname}.t1;",
+ f"select tail(tail(c1) ab from {dbname}.t1)",
+ f"select tail(c1) as int from {dbname}.t1",
+ f"select tail('c1') from {dbname}.t1",
+ f"select tail(NULL) from {dbname}.t1",
+ f"select tail('') from {dbname}.t1",
+ f"select tail(c%) from {dbname}.t1",
+ f"select tail(t1) from {dbname}.t1",
+ f"select tail(True) from {dbname}.t1",
+ f"select tail(c1,1) , count(c1) from {dbname}.t1",
+ f"select tail(c1,1) , avg(c1) from {dbname}.t1",
+ f"select tail(c1,1) , min(c1) from {dbname}.t1",
+ f"select tail(c1,1) , spread(c1) from {dbname}.t1",
+ f"select tail(c1,1) , diff(c1) from {dbname}.t1",
+ f"select tail from {dbname}.stb1 partition by tbname",
+ f"select tail(123--123)==1 from {dbname}.stb1 partition by tbname",
+ f"select tail(123,123) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,ts) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,c1,ts) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1) as 'd1' from {dbname}.stb1 partition by tbname",
+ f"select tail(c1 ,c2 ) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1 ,NULL) from {dbname}.stb1 partition by tbname",
+ f"select tail(,) from {dbname}.stb1 partition by tbname;",
+ f"select tail(tail(c1) ab from {dbname}.stb1 partition by tbname)",
+ f"select tail(c1) as int from {dbname}.stb1 partition by tbname",
+ f"select tail('c1') from {dbname}.stb1 partition by tbname",
+ f"select tail(NULL) from {dbname}.stb1 partition by tbname",
+ f"select tail('') from {dbname}.stb1 partition by tbname",
+ f"select tail(c%) from {dbname}.stb1 partition by tbname",
+ f"select tail(t1) from {dbname}.stb1 partition by tbname",
+ f"select tail(True) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , count(c1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , avg(c1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , min(c1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , spread(c1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , diff(c1) from {dbname}.stb1 partition by tbname",
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
-
- def support_types(self):
+
+ def support_types(self, dbname="db"):
other_no_value_types = [
- "select tail(ts,1) from t1" ,
- "select tail(c7,1) from t1",
- "select tail(c8,1) from t1",
- "select tail(c9,1) from t1",
- "select tail(ts,1) from ct1" ,
- "select tail(c7,1) from ct1",
- "select tail(c8,1) from ct1",
- "select tail(c9,1) from ct1",
- "select tail(ts,1) from ct3" ,
- "select tail(c7,1) from ct3",
- "select tail(c8,1) from ct3",
- "select tail(c9,1) from ct3",
- "select tail(ts,1) from ct4" ,
- "select tail(c7,1) from ct4",
- "select tail(c8,1) from ct4",
- "select tail(c9,1) from ct4",
- "select tail(ts,1) from stb1 partition by tbname" ,
- "select tail(c7,1) from stb1 partition by tbname",
- "select tail(c8,1) from stb1 partition by tbname",
- "select tail(c9,1) from stb1 partition by tbname"
+ f"select tail(ts,1) from {dbname}.t1" ,
+ f"select tail(c7,1) from {dbname}.t1",
+ f"select tail(c8,1) from {dbname}.t1",
+ f"select tail(c9,1) from {dbname}.t1",
+ f"select tail(ts,1) from {dbname}.ct1" ,
+ f"select tail(c7,1) from {dbname}.ct1",
+ f"select tail(c8,1) from {dbname}.ct1",
+ f"select tail(c9,1) from {dbname}.ct1",
+ f"select tail(ts,1) from {dbname}.ct3" ,
+ f"select tail(c7,1) from {dbname}.ct3",
+ f"select tail(c8,1) from {dbname}.ct3",
+ f"select tail(c9,1) from {dbname}.ct3",
+ f"select tail(ts,1) from {dbname}.ct4" ,
+ f"select tail(c7,1) from {dbname}.ct4",
+ f"select tail(c8,1) from {dbname}.ct4",
+ f"select tail(c9,1) from {dbname}.ct4",
+ f"select tail(ts,1) from {dbname}.stb1 partition by tbname" ,
+ f"select tail(c7,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c8,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c9,1) from {dbname}.stb1 partition by tbname"
]
-
+
for type_sql in other_no_value_types:
tdSql.query(type_sql)
-
+
type_sql_lists = [
- "select tail(c1,1) from t1",
- "select tail(c2,1) from t1",
- "select tail(c3,1) from t1",
- "select tail(c4,1) from t1",
- "select tail(c5,1) from t1",
- "select tail(c6,1) from t1",
+ f"select tail(c1,1) from {dbname}.t1",
+ f"select tail(c2,1) from {dbname}.t1",
+ f"select tail(c3,1) from {dbname}.t1",
+ f"select tail(c4,1) from {dbname}.t1",
+ f"select tail(c5,1) from {dbname}.t1",
+ f"select tail(c6,1) from {dbname}.t1",
- "select tail(c1,1) from ct1",
- "select tail(c2,1) from ct1",
- "select tail(c3,1) from ct1",
- "select tail(c4,1) from ct1",
- "select tail(c5,1) from ct1",
- "select tail(c6,1) from ct1",
+ f"select tail(c1,1) from {dbname}.ct1",
+ f"select tail(c2,1) from {dbname}.ct1",
+ f"select tail(c3,1) from {dbname}.ct1",
+ f"select tail(c4,1) from {dbname}.ct1",
+ f"select tail(c5,1) from {dbname}.ct1",
+ f"select tail(c6,1) from {dbname}.ct1",
- "select tail(c1,1) from ct3",
- "select tail(c2,1) from ct3",
- "select tail(c3,1) from ct3",
- "select tail(c4,1) from ct3",
- "select tail(c5,1) from ct3",
- "select tail(c6,1) from ct3",
+ f"select tail(c1,1) from {dbname}.ct3",
+ f"select tail(c2,1) from {dbname}.ct3",
+ f"select tail(c3,1) from {dbname}.ct3",
+ f"select tail(c4,1) from {dbname}.ct3",
+ f"select tail(c5,1) from {dbname}.ct3",
+ f"select tail(c6,1) from {dbname}.ct3",
- "select tail(c1,1) from stb1 partition by tbname",
- "select tail(c2,1) from stb1 partition by tbname",
- "select tail(c3,1) from stb1 partition by tbname",
- "select tail(c4,1) from stb1 partition by tbname",
- "select tail(c5,1) from stb1 partition by tbname",
- "select tail(c6,1) from stb1 partition by tbname",
+ f"select tail(c1,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c2,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c3,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c4,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c5,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c6,1) from {dbname}.stb1 partition by tbname",
- "select tail(c6,1) as alisb from stb1 partition by tbname",
- "select tail(c6,1) alisb from stb1 partition by tbname",
+ f"select tail(c6,1) as alisb from {dbname}.stb1 partition by tbname",
+ f"select tail(c6,1) alisb from {dbname}.stb1 partition by tbname",
]
for type_sql in type_sql_lists:
@@ -189,7 +186,6 @@ class TDTestCase:
tail_result = tdSql.queryResult
tdSql.query(equal_sql)
- print(equal_sql)
equal_result = tdSql.queryResult
@@ -198,257 +194,255 @@ class TDTestCase:
else:
tdLog.exit(" tail query check fail , tail sql is: %s " %tail_sql)
- def basic_tail_function(self):
+ def basic_tail_function(self, dbname="db"):
- # basic query
- tdSql.query("select c1 from ct3")
+ # basic query
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select tail(c1,1) from ct3")
+ tdSql.query(f"select tail(c1,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c2,1) from ct3")
+ tdSql.query(f"select tail(c2,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c3,1) from ct3")
+ tdSql.query(f"select tail(c3,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c4,1) from ct3")
+ tdSql.query(f"select tail(c4,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c5,1) from ct3")
+ tdSql.query(f"select tail(c5,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c6,1) from ct3")
-
+ tdSql.query(f"select tail(c6,1) from {dbname}.ct3")
+
# auto check for t1 table
# used for regular table
- tdSql.query("select tail(c1,1) from t1")
-
- tdSql.query("desc t1")
+ tdSql.query(f"select tail(c1,1) from {dbname}.t1")
+
+ tdSql.query(f"desc {dbname}.t1")
col_lists_rows = tdSql.queryResult
col_lists = []
for col_name in col_lists_rows:
if col_name[0] =="ts":
continue
-
+
col_lists.append(col_name[0])
-
+
for col in col_lists:
- for loop in range(100):
+ for loop in range(100):
limit = randint(1,100)
offset = randint(0,100)
- self.check_tail_table("t1" , col , limit , offset)
+ self.check_tail_table(f"{dbname}.t1" , col , limit , offset)
# tail for invalid params
-
- tdSql.error("select tail(c1,-10,10) from ct1")
- tdSql.error("select tail(c1,10,10000) from ct1")
- tdSql.error("select tail(c1,10,-100) from ct1")
- tdSql.error("select tail(c1,100/2,10) from ct1")
- tdSql.error("select tail(c1,5,10*2) from ct1")
- tdSql.query("select tail(c1,100,100) from ct1")
+
+ tdSql.error(f"select tail(c1,-10,10) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10000) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,-100) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,100/2,10) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,5,10*2) from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,100,100) from {dbname}.ct1")
tdSql.checkRows(0)
- tdSql.query("select tail(c1,10,100) from ct1")
+ tdSql.query(f"select tail(c1,10,100) from {dbname}.ct1")
tdSql.checkRows(0)
- tdSql.error("select tail(c1,10,101) from ct1")
- tdSql.query("select tail(c1,10,0) from ct1")
- tdSql.query("select tail(c1,100,10) from ct1")
+ tdSql.error(f"select tail(c1,10,101) from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,10,0) from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,100,10) from {dbname}.ct1")
tdSql.checkRows(3)
-
+
# tail with super tags
- tdSql.query("select tail(c1,10,10) from ct1")
+ tdSql.query(f"select tail(c1,10,10) from {dbname}.ct1")
tdSql.checkRows(3)
- tdSql.query("select tail(c1,10,10),tbname from ct1")
- tdSql.query("select tail(c1,10,10),t1 from ct1")
+ tdSql.query(f"select tail(c1,10,10),tbname from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,10,10),t1 from {dbname}.ct1")
- # tail with common col
- tdSql.query("select tail(c1,10,10) ,ts from ct1")
- tdSql.query("select tail(c1,10,10) ,c1 from ct1")
+ # tail with common col
+ tdSql.query(f"select tail(c1,10,10) ,ts from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,10,10) ,c1 from {dbname}.ct1")
+
+ # tail with scalar function
+ tdSql.query(f"select tail(c1,10,10) ,abs(c1) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10) , tail(c2,10,10) from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,10,10) , abs(c2)+2 from {dbname}.ct1")
- # tail with scalar function
- tdSql.query("select tail(c1,10,10) ,abs(c1) from ct1")
- tdSql.error("select tail(c1,10,10) , tail(c2,10,10) from ct1")
- tdSql.query("select tail(c1,10,10) , abs(c2)+2 from ct1")
-
# bug need fix for scalar value or compute again
- # tdSql.error(" select tail(c1,10,10) , 123 from ct1")
- # tdSql.error(" select abs(tail(c1,10,10)) from ct1")
- # tdSql.error(" select abs(tail(c1,10,10)) + 2 from ct1")
+ # tdSql.error(f"select tail(c1,10,10) , 123 from {dbname}.ct1")
+ # tdSql.error(f"select abs(tail(c1,10,10)) from {dbname}.ct1")
+ # tdSql.error(f"select abs(tail(c1,10,10)) + 2 from {dbname}.ct1")
- # tail with aggregate function
- tdSql.error("select tail(c1,10,10) ,sum(c1) from ct1")
- tdSql.error("select tail(c1,10,10) ,max(c1) from ct1")
- tdSql.error("select tail(c1,10,10) ,csum(c1) from ct1")
- tdSql.error("select tail(c1,10,10) ,count(c1) from ct1")
+ # tail with aggregate function
+ tdSql.error(f"select tail(c1,10,10) ,sum(c1) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10) ,max(c1) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10) ,csum(c1) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10) ,count(c1) from {dbname}.ct1")
# tail with filter where
- tdSql.query("select tail(c1,3,1) from ct4 where c1 is null")
+ tdSql.query(f"select tail(c1,3,1) from {dbname}.ct4 where c1 is null")
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
- tdSql.query("select tail(c1,3,2) from ct4 where c1 >2 order by 1")
+ tdSql.query(f"select tail(c1,3,2) from {dbname}.ct4 where c1 >2 order by 1")
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 6)
tdSql.checkData(2, 0, 7)
- tdSql.query("select tail(c1,2,1) from ct4 where c2 between 0 and 99999 order by 1")
+ tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 where c2 between 0 and 99999 order by 1")
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 2)
- # tail with union all
- tdSql.query("select tail(c1,2,1) from ct4 union all select c1 from ct1")
+ # tail with union all
+ tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 union all select c1 from {dbname}.ct1")
tdSql.checkRows(15)
- tdSql.query("select tail(c1,2,1) from ct4 union all select c1 from ct2 order by 1")
+ tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 union all select c1 from {dbname}.ct2 order by 1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 0)
tdSql.checkData(1, 0, 1)
- tdSql.query("select tail(c2,2,1) from ct4 union all select abs(c2)/2 from ct4")
+ tdSql.query(f"select tail(c2,2,1) from {dbname}.ct4 union all select abs(c2)/2 from {dbname}.ct4")
tdSql.checkRows(14)
- # tail with join
- # prepare join datas with same ts
+ # tail with join
+ # prepare join datas with same ts
- tdSql.execute(" use db ")
- tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table tb1 using st1 tags(1)")
- tdSql.execute(" create table tb2 using st1 tags(2)")
+ tdSql.execute(f" create stable {dbname}.st1 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(f" create table {dbname}.tb1 using {dbname}.st1 tags(1)")
+ tdSql.execute(f" create table {dbname}.tb2 using {dbname}.st1 tags(2)")
- tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table ttb1 using st2 tags(1)")
- tdSql.execute(" create table ttb2 using st2 tags(2)")
+ tdSql.execute(f" create stable {dbname}.st2 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(f" create table {dbname}.ttb1 using {dbname}.st2 tags(1)")
+ tdSql.execute(f" create table {dbname}.ttb2 using {dbname}.st2 tags(2)")
start_ts = 1622369635000 # 2021-05-30 18:13:55
for i in range(10):
ts_value = start_ts+i*1000
- tdSql.execute(f" insert into tb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into tb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})")
- tdSql.query("select tail(tb2.num,3,2) from tb1, tb2 where tb1.ts=tb2.ts order by 1 desc")
+ tdSql.query(f"select tail(tb2.num,3,2) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts order by 1 desc")
tdSql.checkRows(3)
tdSql.checkData(0,0,7)
tdSql.checkData(1,0,6)
tdSql.checkData(2,0,5)
# nest query
- # tdSql.query("select tail(c1,2) from (select _rowts , c1 from ct1)")
- tdSql.query("select c1 from (select tail(c1,2) c1 from ct4) order by 1 nulls first")
+ # tdSql.query(f"select tail(c1,2) from (select _rowts , c1 from {dbname}.ct1)")
+ tdSql.query(f"select c1 from (select tail(c1,2) c1 from {dbname}.ct4) order by 1 nulls first")
tdSql.checkRows(2)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 0)
- tdSql.query("select sum(c1) from (select tail(c1,2) c1 from ct1)")
+ tdSql.query(f"select sum(c1) from (select tail(c1,2) c1 from {dbname}.ct1)")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 18)
- tdSql.query("select abs(c1) from (select tail(c1,2) c1 from ct1)")
+ tdSql.query(f"select abs(c1) from (select tail(c1,2) c1 from {dbname}.ct1)")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 9)
-
+
#partition by tbname
- tdSql.query(" select tail(c1,5) from stb1 partition by tbname ")
+ tdSql.query(f"select tail(c1,5) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(10)
- tdSql.query(" select tail(c1,3) from stb1 partition by tbname ")
+ tdSql.query(f"select tail(c1,3) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(6)
-
- # group by
- tdSql.error("select tail(c1,2) from ct1 group by c1")
- tdSql.error("select tail(c1,2) from ct1 group by tbname")
+
+ # group by
+ tdSql.error(f"select tail(c1,2) from {dbname}.ct1 group by c1")
+ tdSql.error(f"select tail(c1,2) from {dbname}.ct1 group by tbname")
# super table
- tdSql.error("select tbname , tail(c1,2) from stb1 group by tbname")
- tdSql.query("select tail(c1,2) from stb1 partition by tbname")
+ tdSql.error(f"select tbname , tail(c1,2) from {dbname}.stb1 group by tbname")
+ tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
- # bug need fix
- # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname")
+ # bug need fix
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname")
# tdSql.checkRows(4)
- # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname order by tbname")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname order by tbname")
# tdSql.checkRows(4)
- # tdSql.query(" select tbname , count(c1) from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , count(c1) from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , max(c1) ,c1 from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname ,first(c1) from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname ,first(c1) from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- tdSql.query("select tail(c1,2) from stb1 partition by tbname")
+ tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
- # # bug need fix
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where t1 = 0 partition by tbname ")
+ # # bug need fix
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where t1 = 0 partition by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where t1 = 0 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where t1 = 0 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname order by tbname ")
# tdSql.checkRows(3)
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname ")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname ")
# tdSql.checkRows(3)
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname ")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname ")
# tdSql.checkRows(3)
- tdSql.query(" select tail(t1,2) from stb1 ")
+ tdSql.query(f"select tail(t1,2) from {dbname}.stb1 ")
tdSql.checkRows(2)
- tdSql.query(" select tail(t1+c1,2) from stb1 ")
+ tdSql.query(f"select tail(t1+c1,2) from {dbname}.stb1 ")
tdSql.checkRows(2)
- tdSql.query(" select tail(t1+c1,2) from stb1 partition by tbname ")
+ tdSql.query(f"select tail(t1+c1,2) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(4)
- tdSql.query(" select tail(t1,2) from stb1 partition by tbname ")
+ tdSql.query(f"select tail(t1,2) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(4)
- # nest query
- tdSql.query(" select tail(c1,2) from (select _rowts , t1 ,c1 , tbname from stb1 ) ")
+ # nest query
+ tdSql.query(f"select tail(c1,2) from (select _rowts , t1 ,c1 , tbname from {dbname}.stb1 ) ")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.checkData(1,0,9)
- tdSql.query("select tail(t1,2) from (select _rowts , t1 , tbname from stb1 )")
+ tdSql.query(f"select tail(t1,2) from (select _rowts , t1 , tbname from {dbname}.stb1 )")
tdSql.checkRows(2)
tdSql.checkData(0,0,4)
tdSql.checkData(1,0,1)
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
-
+
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
-
- tdSql.query("select tail(c2,2) from sub1_bound order by 1 desc")
+
+ tdSql.query(f"select tail(c2,2) from {dbname}.sub1_bound order by 1 desc")
tdSql.checkRows(2)
tdSql.checkData(0,0,9223372036854775803)
@@ -456,22 +450,22 @@ class TDTestCase:
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- tdLog.printNoPrefix("==========step2:test errors ==============")
+ tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
-
- tdLog.printNoPrefix("==========step3:support types ============")
+
+ tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
- tdLog.printNoPrefix("==========step4: tail basic query ============")
+ tdLog.printNoPrefix("==========step4: tail basic query ============")
self.basic_tail_function()
- tdLog.printNoPrefix("==========step5: tail boundary query ============")
+ tdLog.printNoPrefix("==========step5: tail boundary query ============")
self.check_boundary_values()
diff --git a/tests/system-test/2-query/tan.py b/tests/system-test/2-query/tan.py
index da47c1c2b2..683cee37ff 100644
--- a/tests/system-test/2-query/tan.py
+++ b/tests/system-test/2-query/tan.py
@@ -9,48 +9,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
- def prepare_datas(self):
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
+
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -61,18 +59,18 @@ class TDTestCase:
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
- ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
+ ( '2022-12-31 01:01:36.000', 9, -99999, -999, -99, -9.99, -99999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
-
+
def check_result_auto_tan(self ,origin_query , pow_query):
pow_result = tdSql.getResult(pow_query)
origin_result = tdSql.getResult(origin_query)
auto_result =[]
-
+
for row in origin_result:
row_check = []
for elem in row:
@@ -82,190 +80,178 @@ class TDTestCase:
elem = math.tan(elem)
row_check.append(elem)
auto_result.append(row_check)
-
- check_status = True
-
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("tan function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("tan value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def test_errors(self):
+ tdSql.checkData(row_index , col_index ,auto_result[row_index][col_index] )
+
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select tan from t1",
- # "select tan(-+--+c1 ) from t1",
- # "select +-tan(c1) from t1",
- # "select ++-tan(c1) from t1",
- # "select ++--tan(c1) from t1",
- # "select - -tan(c1)*0 from t1",
- # "select tan(tbname+1) from t1 ",
- "select tan(123--123)==1 from t1",
- "select tan(c1) as 'd1' from t1",
- "select tan(c1 ,c2) from t1",
- "select tan(c1 ,NULL ) from t1",
- "select tan(,) from t1;",
- "select tan(tan(c1) ab from t1)",
- "select tan(c1 ) as int from t1",
- "select tan from stb1",
- # "select tan(-+--+c1) from stb1",
- # "select +-tan(c1) from stb1",
- # "select ++-tan(c1) from stb1",
- # "select ++--tan(c1) from stb1",
- # "select - -tan(c1)*0 from stb1",
- # "select tan(tbname+1) from stb1 ",
- "select tan(123--123)==1 from stb1",
- "select tan(c1) as 'd1' from stb1",
- "select tan(c1 ,c2 ) from stb1",
- "select tan(c1 ,NULL) from stb1",
- "select tan(,) from stb1;",
- "select tan(tan(c1) ab from stb1)",
- "select tan(c1) as int from stb1"
+ f"select tan from {dbname}.t1",
+ # f"select tan(-+--+c1 ) from {dbname}.t1",
+ # f"select +-tan(c1) from {dbname}.t1",
+ # f"select ++-tan(c1) from {dbname}.t1",
+ # f"select ++--tan(c1) from {dbname}.t1",
+ # f"select - -tan(c1)*0 from {dbname}.t1",
+ # f"select tan(tbname+1) from {dbname}.t1 ",
+ f"select tan(123--123)==1 from {dbname}.t1",
+ f"select tan(c1) as 'd1' from {dbname}.t1",
+ f"select tan(c1 ,c2) from {dbname}.t1",
+ f"select tan(c1 ,NULL ) from {dbname}.t1",
+ f"select tan(,) from {dbname}.t1;",
+ f"select tan(tan(c1) ab from {dbname}.t1)",
+ f"select tan(c1 ) as int from {dbname}.t1",
+ f"select tan from {dbname}.stb1",
+ # f"select tan(-+--+c1) from {dbname}.stb1",
+ # f"select +-tan(c1) from {dbname}.stb1",
+ # f"select ++-tan(c1) from {dbname}.stb1",
+ # f"select ++--tan(c1) from {dbname}.stb1",
+ # f"select - -tan(c1)*0 from {dbname}.stb1",
+ # f"select tan(tbname+1) from {dbname}.stb1 ",
+ f"select tan(123--123)==1 from {dbname}.stb1",
+ f"select tan(c1) as 'd1' from {dbname}.stb1",
+ f"select tan(c1 ,c2 ) from {dbname}.stb1",
+ f"select tan(c1 ,NULL) from {dbname}.stb1",
+ f"select tan(,) from {dbname}.stb1;",
+ f"select tan(tan(c1) ab from {dbname}.stb1)",
+ f"select tan(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
-
- def support_types(self):
+
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select tan(ts) from t1" ,
- "select tan(c7) from t1",
- "select tan(c8) from t1",
- "select tan(c9) from t1",
- "select tan(ts) from ct1" ,
- "select tan(c7) from ct1",
- "select tan(c8) from ct1",
- "select tan(c9) from ct1",
- "select tan(ts) from ct3" ,
- "select tan(c7) from ct3",
- "select tan(c8) from ct3",
- "select tan(c9) from ct3",
- "select tan(ts) from ct4" ,
- "select tan(c7) from ct4",
- "select tan(c8) from ct4",
- "select tan(c9) from ct4",
- "select tan(ts) from stb1" ,
- "select tan(c7) from stb1",
- "select tan(c8) from stb1",
- "select tan(c9) from stb1" ,
+ f"select tan(ts) from {dbname}.t1" ,
+ f"select tan(c7) from {dbname}.t1",
+ f"select tan(c8) from {dbname}.t1",
+ f"select tan(c9) from {dbname}.t1",
+ f"select tan(ts) from {dbname}.ct1" ,
+ f"select tan(c7) from {dbname}.ct1",
+ f"select tan(c8) from {dbname}.ct1",
+ f"select tan(c9) from {dbname}.ct1",
+ f"select tan(ts) from {dbname}.ct3" ,
+ f"select tan(c7) from {dbname}.ct3",
+ f"select tan(c8) from {dbname}.ct3",
+ f"select tan(c9) from {dbname}.ct3",
+ f"select tan(ts) from {dbname}.ct4" ,
+ f"select tan(c7) from {dbname}.ct4",
+ f"select tan(c8) from {dbname}.ct4",
+ f"select tan(c9) from {dbname}.ct4",
+ f"select tan(ts) from {dbname}.stb1" ,
+ f"select tan(c7) from {dbname}.stb1",
+ f"select tan(c8) from {dbname}.stb1",
+ f"select tan(c9) from {dbname}.stb1" ,
- "select tan(ts) from stbbb1" ,
- "select tan(c7) from stbbb1",
+ f"select tan(ts) from {dbname}.stbbb1" ,
+ f"select tan(c7) from {dbname}.stbbb1",
- "select tan(ts) from tbname",
- "select tan(c9) from tbname"
+ f"select tan(ts) from {dbname}.tbname",
+ f"select tan(c9) from {dbname}.tbname"
]
-
+
for type_sql in type_error_sql_lists:
tdSql.error(type_sql)
-
-
+
+
type_sql_lists = [
- "select tan(c1) from t1",
- "select tan(c2) from t1",
- "select tan(c3) from t1",
- "select tan(c4) from t1",
- "select tan(c5) from t1",
- "select tan(c6) from t1",
+ f"select tan(c1) from {dbname}.t1",
+ f"select tan(c2) from {dbname}.t1",
+ f"select tan(c3) from {dbname}.t1",
+ f"select tan(c4) from {dbname}.t1",
+ f"select tan(c5) from {dbname}.t1",
+ f"select tan(c6) from {dbname}.t1",
- "select tan(c1) from ct1",
- "select tan(c2) from ct1",
- "select tan(c3) from ct1",
- "select tan(c4) from ct1",
- "select tan(c5) from ct1",
- "select tan(c6) from ct1",
+ f"select tan(c1) from {dbname}.ct1",
+ f"select tan(c2) from {dbname}.ct1",
+ f"select tan(c3) from {dbname}.ct1",
+ f"select tan(c4) from {dbname}.ct1",
+ f"select tan(c5) from {dbname}.ct1",
+ f"select tan(c6) from {dbname}.ct1",
- "select tan(c1) from ct3",
- "select tan(c2) from ct3",
- "select tan(c3) from ct3",
- "select tan(c4) from ct3",
- "select tan(c5) from ct3",
- "select tan(c6) from ct3",
+ f"select tan(c1) from {dbname}.ct3",
+ f"select tan(c2) from {dbname}.ct3",
+ f"select tan(c3) from {dbname}.ct3",
+ f"select tan(c4) from {dbname}.ct3",
+ f"select tan(c5) from {dbname}.ct3",
+ f"select tan(c6) from {dbname}.ct3",
- "select tan(c1) from stb1",
- "select tan(c2) from stb1",
- "select tan(c3) from stb1",
- "select tan(c4) from stb1",
- "select tan(c5) from stb1",
- "select tan(c6) from stb1",
+ f"select tan(c1) from {dbname}.stb1",
+ f"select tan(c2) from {dbname}.stb1",
+ f"select tan(c3) from {dbname}.stb1",
+ f"select tan(c4) from {dbname}.stb1",
+ f"select tan(c5) from {dbname}.stb1",
+ f"select tan(c6) from {dbname}.stb1",
- "select tan(c6) as alisb from stb1",
- "select tan(c6) alisb from stb1",
+ f"select tan(c6) as alisb from {dbname}.stb1",
+ f"select tan(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
-
- def basic_tan_function(self):
- # basic query
- tdSql.query("select c1 from ct3")
+ def basic_tan_function(self, dbname="db"):
+
+ # basic query
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select tan(c1) from ct3")
+ tdSql.query(f"select tan(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c2) from ct3")
+ tdSql.query(f"select tan(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c3) from ct3")
+ tdSql.query(f"select tan(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c4) from ct3")
+ tdSql.query(f"select tan(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c5) from ct3")
+ tdSql.query(f"select tan(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c6) from ct3")
+ tdSql.query(f"select tan(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select tan(c1) from t1")
+ tdSql.query(f"select tan(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1.557407725)
tdSql.checkData(3 , 0, -0.142546543)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_tan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from t1")
-
+ self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.t1", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)) from {dbname}.t1")
+
# used for sub table
- tdSql.query("select c2 ,tan(c2) from ct1")
+ tdSql.query(f"select c2 ,tan(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, -0.226288661)
tdSql.checkData(1 , 1, 0.670533806)
tdSql.checkData(3 , 1, -1.325559275)
tdSql.checkData(4 , 1, 0.000000000)
- tdSql.query("select c1, c5 ,tan(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,tan(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, -0.605942929)
tdSql.checkData(2 , 2, 11.879355609)
tdSql.checkData(3 , 2, 0.395723765)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_tan( "select c1, c2, c3 , c4, c5 from ct1", "select tan(c1), tan(c2) ,tan(c3), tan(c4), tan(c5) from ct1")
-
+ self.check_result_auto_tan( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select tan(c1), tan(c2) ,tan(c3), tan(c4), tan(c5) from {dbname}.ct1")
+
# nest query for tan functions
- tdSql.query("select c4 , tan(c4) ,tan(tan(c4)) , tan(tan(tan(c4))) from ct1;")
+ tdSql.query(f"select c4 , tan(c4) ,tan(tan(c4)) , tan(tan(tan(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , 0.035420501)
tdSql.checkData(0 , 2 , 0.035435322)
@@ -281,52 +267,52 @@ class TDTestCase:
tdSql.checkData(11 , 2 , -0.040227928)
tdSql.checkData(11 , 3 , -0.040249642)
- # used for stable table
-
- tdSql.query("select tan(c1) from stb1")
+ # used for stable table
+
+ tdSql.query(f"select tan(c1) from {dbname}.stb1")
tdSql.checkRows(25)
-
+
# used for not exists table
- tdSql.error("select tan(c1) from stbbb1")
- tdSql.error("select tan(c1) from tbname")
- tdSql.error("select tan(c1) from ct5")
+ tdSql.error(f"select tan(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select tan(c1) from {dbname}.tbname")
+ tdSql.error(f"select tan(c1) from {dbname}.ct5")
+
+ # mix with common col
+ tdSql.query(f"select c1, tan(c1) from {dbname}.ct1")
+ tdSql.query(f"select c2, tan(c2) from {dbname}.ct4")
- # mix with common col
- tdSql.query("select c1, tan(c1) from ct1")
- tdSql.query("select c2, tan(c2) from ct4")
-
# mix with common functions
- tdSql.query("select c1, tan(c1),tan(c1), tan(tan(c1)) from ct4 ")
+ tdSql.query(f"select c1, tan(c1),tan(c1), tan(tan(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None)
-
+
tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 ,-0.291006191)
tdSql.checkData(3 , 2 ,-0.291006191)
tdSql.checkData(3 , 3 ,-0.299508909)
- tdSql.query("select c1, tan(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, tan(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, tan(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, tan(c1),c5, count(c5) from ct1 ")
- tdSql.error("select tan(c1), count(c5) from stb1 ")
- tdSql.error("select tan(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, tan(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, tan(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select tan(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select tan(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
-
- # # bug fix for compute
- tdSql.query("select c1, tan(c1) -0 ,tan(c1-4)-0 from ct4 ")
+
+ # # bug fix for compute
+ tdSql.query(f"select c1, tan(c1) -0 ,tan(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -334,7 +320,7 @@ class TDTestCase:
tdSql.checkData(1, 1, -6.799711455)
tdSql.checkData(1, 2, 1.157821282)
- tdSql.query(" select c1, tan(c1) -0 ,tan(c1-0.1)-0.1 from ct4")
+ tdSql.query(f"select c1, tan(c1) -0 ,tan(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -342,35 +328,33 @@ class TDTestCase:
tdSql.checkData(1, 1, -6.799711455)
tdSql.checkData(1, 2, -21.815112681)
- tdSql.query("select c1, tan(c1), c2, tan(c2), c3, tan(c3) from ct1")
+ tdSql.query(f"select c1, tan(c1), c2, tan(c2), c3, tan(c3) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, tan(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.tan(100000000))
-
- tdSql.query("select c1, tan(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.tan(10000000000000))
- tdSql.query("select c1, tan(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, tan(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, tan(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, math.tan(10000000000000000000000000.0))
- tdSql.query("select c1, tan(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, tan(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.tan(10000000000000000000000000000000000.0))
- tdSql.query("select c1, tan(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, tan(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.tan(10000000000000000000000000000000000000000.0))
- tdSql.query("select c1, tan(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -378,7 +362,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,-7.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -386,7 +370,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,-3.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1>tan(c1) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1>tan(c1) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,88888)
@@ -394,45 +378,40 @@ class TDTestCase:
tdSql.checkData(0,3,8.000000000)
tdSql.checkData(0,4,7.900000000)
tdSql.checkData(0,5,-7.000000000)
-
- def pow_Arithmetic(self):
- pass
-
- def check_boundary_values(self):
+
+ def check_boundary_values(self, dbname="bound_test"):
PI=3.1415926
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_tan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from sub1_bound")
-
- self.check_result_auto_tan( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select tan(c1), tan(c2) ,tan(c3), tan(c3), tan(c2) ,tan(c1) from sub1_bound")
+ self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)) from {dbname}.sub1_bound")
+
+ self.check_result_auto_tan( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select tan(c1), tan(c2) ,tan(c3), tan(c3), tan(c2) ,tan(c1) from {dbname}.sub1_bound")
+
+ self.check_result_auto_tan(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select tan(abs(c1)) from {dbname}.sub1_bound" )
- self.check_result_auto_tan("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select tan(abs(c1)) from sub1_bound" )
-
# check basic elem for table per row
- tdSql.query("select tan(abs(c1)) ,tan(abs(c2)) , tan(abs(c3)) , tan(abs(c4)), tan(abs(c5)), tan(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select tan(abs(c1)) ,tan(abs(c2)) , tan(abs(c3)) , tan(abs(c4)), tan(abs(c5)), tan(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.tan(2147483647))
tdSql.checkData(0,1,math.tan(9223372036854775807))
tdSql.checkData(0,2,math.tan(32767))
@@ -450,76 +429,71 @@ class TDTestCase:
tdSql.checkData(3,4,math.tan(339999995214436424907732413799364296704.00000))
# check + - * / in functions
- tdSql.query("select tan(abs(c1+1)) ,tan(abs(c2)) , tan(abs(c3*1)) , tan(abs(c4/2)), tan(abs(c5))/2, tan(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select tan(abs(c1+1)) ,tan(abs(c2)) , tan(abs(c3*1)) , tan(abs(c4/2)), tan(abs(c5))/2, tan(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.tan(2147483648.000000000))
tdSql.checkData(0,1,math.tan(9223372036854775807))
tdSql.checkData(0,2,math.tan(32767.000000000))
tdSql.checkData(0,3,math.tan(63.500000000))
- tdSql.execute("create stable st (ts timestamp, num1 float, num2 double) tags (t1 int);")
- tdSql.execute(f'create table tb1 using st tags (1)')
- tdSql.execute(f'create table tb2 using st tags (2)')
- tdSql.execute(f'create table tb3 using st tags (3)')
- tdSql.execute('insert into tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb1 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
+ tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);")
+ tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)')
+ tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)')
+ tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})')
- tdSql.execute('insert into tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb2 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})')
- for i in range(100):
- tdSql.execute('insert into tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2))
+ self.check_result_auto_tan(f"select num1,num2 from {dbname}.tb3;" , f"select tan(num1),tan(num2) from {dbname}.tb3")
- self.check_result_auto_tan("select num1,num2 from tb3;" , "select tan(num1),tan(num2) from tb3")
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto_tan( f"select c5 from {dbname}.stb1 order by ts " , f"select tan(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_tan( f"select c5 from {dbname}.stb1 order by tbname " , f"select tan(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_tan( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_tan( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_tan( " select c5 from stb1 order by ts " , "select tan(c5) from stb1 order by ts" )
- self.check_result_auto_tan( " select c5 from stb1 order by tbname " , "select tan(c5) from stb1 order by tbname" )
- self.check_result_auto_tan( " select c5 from stb1 where c1 > 0 order by tbname " , "select tan(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_tan( " select c5 from stb1 where c1 > 0 order by tbname " , "select tan(c5) from stb1 where c1 > 0 order by tbname" )
-
- self.check_result_auto_tan( " select t1,c5 from stb1 order by ts " , "select tan(t1), tan(c5) from stb1 order by ts" )
- self.check_result_auto_tan( " select t1,c5 from stb1 order by tbname " , "select tan(t1) ,tan(c5) from stb1 order by tbname" )
- self.check_result_auto_tan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select tan(t1) ,tan(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_tan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select tan(t1) , tan(c5) from stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select tan(t1), tan(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select tan(t1) ,tan(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(t1) ,tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(t1) , tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
-
-
+
+
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- tdLog.printNoPrefix("==========step2:test errors ==============")
+ tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
-
- tdLog.printNoPrefix("==========step3:support types ============")
+
+ tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
- tdLog.printNoPrefix("==========step4: tan basic query ============")
+ tdLog.printNoPrefix("==========step4: tan basic query ============")
self.basic_tan_function()
- tdLog.printNoPrefix("==========step5: big number tan query ============")
+ tdLog.printNoPrefix("==========step5: big number tan query ============")
self.test_big_number()
-
- tdLog.printNoPrefix("==========step6: tan boundary query ============")
+ tdLog.printNoPrefix("==========step6: tan boundary query ============")
self.check_boundary_values()
- tdLog.printNoPrefix("==========step7: tan filter query ============")
+ tdLog.printNoPrefix("==========step7: tan filter query ============")
self.abs_func_filter()
diff --git a/tests/system-test/2-query/timetruncate.py b/tests/system-test/2-query/timetruncate.py
index 3551d8ee2c..d773114c3c 100644
--- a/tests/system-test/2-query/timetruncate.py
+++ b/tests/system-test/2-query/timetruncate.py
@@ -25,6 +25,7 @@ class TDTestCase:
self.ntbname = f'{self.dbname}.ntb'
self.stbname = f'{self.dbname}.stb'
self.ctbname = f'{self.dbname}.ctb'
+
def check_ms_timestamp(self,unit,date_time):
if unit.lower() == '1a':
for i in range(len(self.ts_str)):
@@ -45,11 +46,12 @@ class TDTestCase:
elif unit.lower() == '1d':
for i in range(len(self.ts_str)):
ts_result = self.get_time.get_ms_timestamp(str(tdSql.queryResult[i][0]))
- tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000)
+ tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000)
elif unit.lower() == '1w':
for i in range(len(self.ts_str)):
ts_result = self.get_time.get_ms_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24/7)*7*24*60*60*1000)
+
def check_us_timestamp(self,unit,date_time):
if unit.lower() == '1u':
for i in range(len(self.ts_str)):
@@ -74,11 +76,12 @@ class TDTestCase:
elif unit.lower() == '1d':
for i in range(len(self.ts_str)):
ts_result = self.get_time.get_us_timestamp(str(tdSql.queryResult[i][0]))
- tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 )
+ tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 )
elif unit.lower() == '1w':
for i in range(len(self.ts_str)):
ts_result = self.get_time.get_us_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24/7)*7*24*60*60*1000*1000)
+
def check_ns_timestamp(self,unit,date_time):
if unit.lower() == '1b':
for i in range(len(self.ts_str)):
@@ -100,21 +103,23 @@ class TDTestCase:
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60)*60*60*1000*1000*1000 )
elif unit.lower() == '1d':
for i in range(len(self.ts_str)):
- tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 )
+ tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 )
elif unit.lower() == '1w':
for i in range(len(self.ts_str)):
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24/7)*7*24*60*60*1000*1000*1000)
+
def check_tb_type(self,unit,tb_type):
- if tb_type.lower() == 'ntb':
+ if tb_type.lower() == 'ntb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}')
elif tb_type.lower() == 'ctb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}')
elif tb_type.lower() == 'stb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}')
+
def data_check(self,date_time,precision,tb_type):
for unit in self.time_unit:
if (unit.lower() == '1u' and precision.lower() == 'ms') or (unit.lower() == '1b' and precision.lower() == 'us') or (unit.lower() == '1b' and precision.lower() == 'ms'):
- if tb_type.lower() == 'ntb':
+ if tb_type.lower() == 'ntb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ntbname}')
elif tb_type.lower() == 'ctb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}')
@@ -139,16 +144,19 @@ class TDTestCase:
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}')
elif tb_type.lower() == 'stb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.stbname}')
+
def function_check_ntb(self):
for precision in self.db_param_precision:
tdSql.execute(f'drop database if exists {self.dbname}')
tdSql.execute(f'create database {self.dbname} precision "{precision}"')
+ tdLog.info(f"=====now is in a {precision} database=====")
tdSql.execute(f'use {self.dbname}')
tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)')
for ts in self.ts_str:
tdSql.execute(f'insert into {self.ntbname} values("{ts}",1)')
date_time = self.get_time.time_transform(self.ts_str,precision)
self.data_check(date_time,precision,'ntb')
+
def function_check_stb(self):
for precision in self.db_param_precision:
tdSql.execute(f'drop database if exists {self.dbname}')
@@ -161,9 +169,11 @@ class TDTestCase:
date_time = self.get_time.time_transform(self.ts_str,precision)
self.data_check(date_time,precision,'ctb')
self.data_check(date_time,precision,'stb')
+
def run(self):
self.function_check_ntb()
self.function_check_stb()
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/system-test/2-query/tsbsQuery.py b/tests/system-test/2-query/tsbsQuery.py
index 617f7e7464..04a80a74ad 100644
--- a/tests/system-test/2-query/tsbsQuery.py
+++ b/tests/system-test/2-query/tsbsQuery.py
@@ -22,7 +22,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
def create_ctable(self,tsql=None, dbName='db',stbName='stb',ctbPrefix='ctb',ctbNum=1):
tsql.execute("use %s" %dbName)
@@ -32,16 +32,16 @@ class TDTestCase:
for i in range(ctbNum):
tagValue = 'beijing'
if (i % 10 == 0):
- sql += " %s%d using %s (name,fleet,driver,device_version,load_capacity,fuel_capacity,nominal_fuel_consumption) tags('truck_%d', 'South%d','Trish%d','v2.%d', 1500+%d*20, 150+%d*2, 5+%d)"%(ctbPrefix,i,stbName,i,i,i,i,(1500+i*20),(150+i*2),(5+i))
+ sql += f" {dbName}.%s%d using %s (name,fleet,driver,device_version,load_capacity,fuel_capacity,nominal_fuel_consumption) tags('truck_%d', 'South%d','Trish%d','v2.%d', 1500+%d*20, 150+%d*2, 5+%d)"%(ctbPrefix,i,stbName,i,i,i,i,(1500+i*20),(150+i*2),(5+i))
else:
model = 'H-%d'%i
- sql += " %s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d', %d, %d,%d)"%(ctbPrefix,i,stbName,i,i,i,model,i,(1500+i*20),(150+i*2),(5+i))
+ sql += f" {dbName}.%s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d', %d, %d,%d)"%(ctbPrefix,i,stbName,i,i,i,model,i,(1500+i*20),(150+i*2),(5+i))
if (i > 0) and (i%1000 == 0):
tsql.execute(sql)
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -54,32 +54,32 @@ class TDTestCase:
startTs = int(round(t * 1000))
for i in range(ctbNum):
- sql += " %s%d values "%(ctbPrefix,i)
+ sql += f" {dbName}.%s%d values "%(ctbPrefix,i)
for j in range(rowsPerTbl):
if(ctbPrefix=="rct"):
sql += f"({startTs+j*60000}, {80+j}, {90+j}, {85+j}, {30+j*10}, {1.2*j}, {221+j*2}, {20+j*0.2}) "
elif ( ctbPrefix=="dct"):
status= random.randint(0,1)
- sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status} ) "
+ sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status} ) "
# tdLog.debug("1insert sql:%s"%sql)
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
# tdLog.debug("2insert sql:%s"%sql)
tsql.execute(sql)
if j < rowsPerTbl - 1:
- sql = "insert into %s%d values " %(ctbPrefix,i)
+ sql = f"insert into {dbName}.%s%d values " %(ctbPrefix,i)
else:
sql = "insert into "
if sql != pre_insert:
# tdLog.debug("3insert sql:%s"%sql)
- tsql.execute(sql)
+ tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
def prepareData(self):
dbname="db_tsbs"
- stabname1="readings"
- stabname2="diagnostics"
- ctbnamePre1="rct"
+ stabname1=f"{dbname}.readings"
+ stabname2=f"{dbname}.diagnostics"
+ ctbnamePre1="rct"
ctbnamePre2="dct"
ctbNums=50
self.ctbNums=ctbNums
@@ -107,7 +107,7 @@ class TDTestCase:
# tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}',NULL ,'v2.3')")
# else:
# tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
- # for j in range(ctbNums):
+ # for j in range(ctbNums):
# for i in range(rowNUms):
# tdSql.execute(
# f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
@@ -133,106 +133,106 @@ class TDTestCase:
# tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query )
- def tsbsIotQuery(self,insertinto=True):
-
+ def tsbsIotQuery(self,insertinto=True, dbname="db_tsbs"):
+
tdSql.execute("use db_tsbs")
-
+
# test interval and partition
- tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
+ tdSql.query(f"select avg(velocity) as mean_velocity ,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
parRows=tdSql.queryRows
- tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
+ tdSql.query(f"select avg(velocity) as mean_velocity ,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
tdSql.checkRows(parRows)
-
-
- # test insert into
+
+
+ # test insert into
if insertinto == True :
- tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;")
- tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
-
- tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
+ tdSql.execute(f"create table {dbname}.testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;")
+ tdSql.query(f"insert into {dbname}.testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
+
+ tdSql.query(f"insert into {dbname}.testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
# test paitition interval fill
- tdSql.query("SELECT name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;")
+ tdSql.query(f"select name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;")
# test partition interval limit (PRcore-TD-17410)
- tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);")
+ tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings partition BY name,driver,fleet interval (10m) limit 1);")
tdSql.checkRows(self.ctbNums)
# test partition interval Pseudo time-column
- tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
+ tdSql.query(f"select count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
# 1 high-load:
- tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;")
+ tdSql.query(f"select ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity from {dbname}.diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;")
- tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
+ tdSql.query(f"select ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity from {dbname}.diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
- # 2 stationary-trucks
- tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
- tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
+ # 2 stationary-trucks
+ tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
+ tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
# 3 long-driving-sessions
- tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity FROM readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;")
+ tdSql.query(f"select name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity from {dbname}.readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;")
#4 long-daily-sessions
- tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity FROM readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60")
+ tdSql.query(f"select name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity from {dbname}.readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60")
# 5. avg-daily-driving-duration
- tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
+ tdSql.query(f"select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from {dbname}.readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
- # # 6. avg-daily-driving-session
+ # # 6. avg-daily-driving-session
# #taosc core dumped
- tdSql.query(" SELECT _wstart as ts,name,floor(avg(velocity)/5) AS mv FROM readings WHERE name is not null AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0);")
- # tdSql.query("select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;")
- # tdSql.query("SELECT _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)")
+ tdSql.query(f"select _wstart as ts,name,floor(avg(velocity)/5) AS mv from {dbname}.readings WHERE name is not null AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0);")
+ # tdSql.query(f"select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;")
+ # tdSql.query(f"select _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)")
# 7. avg-load
- tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
+ tdSql.query(f"select fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml from {dbname}.diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
- # 8. daily-activity
- tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
+ # 8. daily-activity
+ tdSql.query(f"select model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
- tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
+ tdSql.query(f"select model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
- tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
+ tdSql.query(f"select _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
- tdSql.query("SELECT _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
+ tdSql.query(f"select _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
# 9. breakdown-frequency
# NULL ---count(NULL)=0 expect count(NULL)= 100
- tdSql.query("SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ")
+ tdSql.query(f"select model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ")
parRows=tdSql.queryRows
assert parRows != 0 , "query result is wrong, query rows %d but expect > 0 " %parRows
- tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
- sql="select model,ctc from (SELECT model,count(state_changed) as ctc FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= 1451606400000 AND ts < 1451952001000 ) WHERE ts >= 1451606400000 AND ts < 1451952001000 partition BY model interval(10m)) partition BY model) WHERE state_changed = 1 partition BY model )where model is null;"
+ tdSql.query(f"select model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
+ sql=f"select model,ctc from (SELECT model,count(state_changed) as ctc FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= 1451606400000 AND ts < 1451952001000 ) WHERE ts >= 1451606400000 AND ts < 1451952001000 partition BY model interval(10m)) partition BY model) WHERE state_changed = 1 partition BY model )where model is null;"
# for i in range(2):
# tdSql.query("%s"%sql)
- # quertR1=tdSql.queryResult
+ # quertR1=tdSql.queryResult
# for j in range(50):
# tdSql.query("%s"%sql)
# quertR2=tdSql.queryResult
- # assert quertR1 == quertR2 , "%s != %s ,The results of multiple queries are different" %(quertR1,quertR2)
+ # assert quertR1 == quertR2 , "%s != %s ,The results of multiple queries are different" %(quertR1,quertR2)
+
-
#it's already supported:
# last-loc
- tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")
+ tdSql.query(f"select last_row(ts),latitude,longitude,name,driver from {dbname}.readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")
#2. low-fuel
- tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;")
-
+ tdSql.query(f"select last_row(ts),name,driver,fuel_state,driver from {dbname}.diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;")
+
# 3. avg-vs-projected-fuel-consumption
- tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet")
-
- def run(self):
+ tdSql.query(f"select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from {dbname}.readings where velocity > 1 group by fleet")
+
+ def run(self):
tdLog.printNoPrefix("==========step1:create database and table,insert data ==============")
self.prepareData()
self.tsbsIotQuery()
diff --git a/tests/system-test/2-query/ttl_comment.py b/tests/system-test/2-query/ttl_comment.py
index 33bd61b66c..c26393158c 100644
--- a/tests/system-test/2-query/ttl_comment.py
+++ b/tests/system-test/2-query/ttl_comment.py
@@ -26,20 +26,21 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
+ tdSql.init(conn.cursor(), False)
def run(self):
+ dbname="db"
tdSql.prepare()
- tdSql.error("create table ttl_table1(ts timestamp, i int) ttl 1.1")
- tdSql.error("create table ttl_table2(ts timestamp, i int) ttl 1e1")
- tdSql.error("create table ttl_table3(ts timestamp, i int) ttl -1")
+ tdSql.error(f"create table {dbname}.ttl_table1(ts timestamp, i int) ttl 1.1")
+ tdSql.error(f"create table {dbname}.ttl_table2(ts timestamp, i int) ttl 1e1")
+ tdSql.error(f"create table {dbname}.ttl_table3(ts timestamp, i int) ttl -1")
print("============== STEP 1 ===== test normal table")
- tdSql.execute("create table normal_table1(ts timestamp, i int)")
- tdSql.execute("create table normal_table2(ts timestamp, i int) comment '' ttl 3")
- tdSql.execute("create table normal_table3(ts timestamp, i int) ttl 2100000000020 comment 'hello'")
+ tdSql.execute(f"create table {dbname}.normal_table1(ts timestamp, i int)")
+ tdSql.execute(f"create table {dbname}.normal_table2(ts timestamp, i int) comment '' ttl 3")
+ tdSql.execute(f"create table {dbname}.normal_table3(ts timestamp, i int) ttl 2100000000020 comment 'hello'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'")
tdSql.checkData(0, 0, 'normal_table1')
@@ -58,32 +59,32 @@ class TDTestCase:
tdSql.checkData(0, 7, 2147483647)
tdSql.checkData(0, 8, 'hello')
- tdSql.execute("alter table normal_table1 comment 'nihao'")
+ tdSql.execute(f"alter table {dbname}.normal_table1 comment 'nihao'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'")
tdSql.checkData(0, 0, 'normal_table1')
tdSql.checkData(0, 8, 'nihao')
- tdSql.execute("alter table normal_table1 comment ''")
+ tdSql.execute(f"alter table {dbname}.normal_table1 comment ''")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'")
tdSql.checkData(0, 0, 'normal_table1')
tdSql.checkData(0, 8, '')
- tdSql.execute("alter table normal_table2 comment 'fly'")
+ tdSql.execute(f"alter table {dbname}.normal_table2 comment 'fly'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table2'")
tdSql.checkData(0, 0, 'normal_table2')
tdSql.checkData(0, 8, 'fly')
- tdSql.execute("alter table normal_table3 comment 'fly'")
+ tdSql.execute(f"alter table {dbname}.normal_table3 comment 'fly'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table3'")
tdSql.checkData(0, 0, 'normal_table3')
tdSql.checkData(0, 8, 'fly')
- tdSql.execute("alter table normal_table1 ttl 1")
+ tdSql.execute(f"alter table {dbname}.normal_table1 ttl 1")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'")
tdSql.checkData(0, 0, 'normal_table1')
tdSql.checkData(0, 7, 1)
- tdSql.execute("alter table normal_table3 ttl 0")
+ tdSql.execute(f"alter table {dbname}.normal_table3 ttl 0")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table3'")
tdSql.checkData(0, 0, 'normal_table3')
tdSql.checkData(0, 7, 0)
@@ -91,9 +92,9 @@ class TDTestCase:
print("============== STEP 2 ===== test super table")
- tdSql.execute("create table super_table1(ts timestamp, i int) tags(t int)")
- tdSql.execute("create table super_table2(ts timestamp, i int) tags(t int) comment ''")
- tdSql.execute("create table super_table3(ts timestamp, i int) tags(t int) comment 'super'")
+ tdSql.execute(f"create table {dbname}.super_table1(ts timestamp, i int) tags(t int)")
+ tdSql.execute(f"create table {dbname}.super_table2(ts timestamp, i int) tags(t int) comment ''")
+ tdSql.execute(f"create table {dbname}.super_table3(ts timestamp, i int) tags(t int) comment 'super'")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'")
tdSql.checkData(0, 0, 'super_table1')
@@ -110,32 +111,32 @@ class TDTestCase:
tdSql.checkData(0, 6, 'super')
- tdSql.execute("alter table super_table1 comment 'nihao'")
+ tdSql.execute(f"alter table {dbname}.super_table1 comment 'nihao'")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'")
tdSql.checkData(0, 0, 'super_table1')
tdSql.checkData(0, 6, 'nihao')
- tdSql.execute("alter table super_table1 comment ''")
+ tdSql.execute(f"alter table {dbname}.super_table1 comment ''")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'")
tdSql.checkData(0, 0, 'super_table1')
tdSql.checkData(0, 6, '')
- tdSql.execute("alter table super_table2 comment 'fly'")
+ tdSql.execute(f"alter table {dbname}.super_table2 comment 'fly'")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table2'")
tdSql.checkData(0, 0, 'super_table2')
tdSql.checkData(0, 6, 'fly')
- tdSql.execute("alter table super_table3 comment 'tdengine'")
+ tdSql.execute(f"alter table {dbname}.super_table3 comment 'tdengine'")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table3'")
tdSql.checkData(0, 0, 'super_table3')
tdSql.checkData(0, 6, 'tdengine')
print("============== STEP 3 ===== test child table")
- tdSql.execute("create table child_table1 using super_table1 tags(1) ttl 10")
- tdSql.execute("create table child_table2 using super_table1 tags(1) comment ''")
- tdSql.execute("create table child_table3 using super_table1 tags(1) comment 'child'")
- tdSql.execute("insert into child_table4 using super_table1 tags(1) values(now, 1)")
+ tdSql.execute(f"create table {dbname}.child_table1 using {dbname}.super_table1 tags(1) ttl 10")
+ tdSql.execute(f"create table {dbname}.child_table2 using {dbname}.super_table1 tags(1) comment ''")
+ tdSql.execute(f"create table {dbname}.child_table3 using {dbname}.super_table1 tags(1) comment 'child'")
+ tdSql.execute(f"insert into {dbname}.child_table4 using {dbname}.super_table1 tags(1) values(now, 1)")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'")
@@ -160,38 +161,38 @@ class TDTestCase:
tdSql.checkData(0, 8, None)
- tdSql.execute("alter table child_table1 comment 'nihao'")
+ tdSql.execute(f"alter table {dbname}.child_table1 comment 'nihao'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'")
tdSql.checkData(0, 0, 'child_table1')
tdSql.checkData(0, 8, 'nihao')
- tdSql.execute("alter table child_table1 comment ''")
+ tdSql.execute(f"alter table {dbname}.child_table1 comment ''")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'")
tdSql.checkData(0, 0, 'child_table1')
tdSql.checkData(0, 8, '')
- tdSql.execute("alter table child_table2 comment 'fly'")
+ tdSql.execute(f"alter table {dbname}.child_table2 comment 'fly'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table2'")
tdSql.checkData(0, 0, 'child_table2')
tdSql.checkData(0, 8, 'fly')
- tdSql.execute("alter table child_table3 comment 'tdengine'")
+ tdSql.execute(f"alter table {dbname}.child_table3 comment 'tdengine'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table3'")
tdSql.checkData(0, 0, 'child_table3')
tdSql.checkData(0, 8, 'tdengine')
- tdSql.execute("alter table child_table4 comment 'tdengine'")
+ tdSql.execute(f"alter table {dbname}.child_table4 comment 'tdengine'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table4'")
tdSql.checkData(0, 0, 'child_table4')
tdSql.checkData(0, 8, 'tdengine')
- tdSql.execute("alter table child_table4 ttl 9")
+ tdSql.execute(f"alter table {dbname}.child_table4 ttl 9")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table4'")
tdSql.checkData(0, 0, 'child_table4')
tdSql.checkData(0, 7, 9)
- tdSql.execute("alter table child_table3 ttl 9")
+ tdSql.execute(f"alter table {dbname}.child_table3 ttl 9")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table3'")
tdSql.checkData(0, 0, 'child_table3')
tdSql.checkData(0, 7, 9)
@@ -203,4 +204,3 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
-
diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py
index 8281527bd4..62940477cf 100644
--- a/tests/system-test/2-query/twa.py
+++ b/tests/system-test/2-query/twa.py
@@ -7,10 +7,7 @@ import platform
import math
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
- "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
+ updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@@ -21,46 +18,45 @@ class TDTestCase:
self.row_nums = 100
self.time_step = 1000
- def prepare_datas_of_distribute(self):
+ def prepare_datas_of_distribute(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
- tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
for i in range(self.tb_nums):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
ts = self.ts
for j in range(self.row_nums):
ts+=j*self.time_step
tdSql.execute(
- f"insert into ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
+ f"insert into {dbname}.ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
- tdSql.execute("insert into ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdLog.info(" prepare data for distributed_aggregate done! ")
- def twa_support_types(self):
- tdSql.query("desc stb1 ")
+ def twa_support_types(self, dbname="testdb"):
+ tdSql.query(f"desc {dbname}.stb1 ")
schema_list = tdSql.queryResult
for col_type in schema_list:
if col_type[1] in ["TINYINT" ,"SMALLINT","BIGINT" ,"INT","FLOAT","DOUBLE"]:
- tdSql.query(f" select twa({col_type[0]}) from stb1 partition by tbname ")
+ tdSql.query(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ")
else:
- tdSql.error(f" select twa({col_type[0]}) from stb1 partition by tbname ")
+ tdSql.error(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ")
- def check_distribute_datas(self):
+ def check_distribute_datas(self, dbname="testdb"):
# get vgroup_ids of all
- tdSql.query("show vgroups ")
+ tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@@ -69,7 +65,7 @@ class TDTestCase:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
- tdSql.query(f"select * from information_schema.ins_tables where db_name = 'testdb' and table_name like 'ct%'")
+ tdSql.query(f"select * from information_schema.ins_tables where db_name = '{dbname}' and table_name like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@@ -83,28 +79,28 @@ class TDTestCase:
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
- def distribute_twa_query(self):
+ def distribute_twa_query(self, dbname="testdb"):
# basic filter
- tdSql.query(" select twa(c1) from ct1 ")
+ tdSql.query(f"select twa(c1) from {dbname}.ct1 ")
tdSql.checkData(0,0,1.000000000)
- tdSql.query(" select twa(c1) from stb1 partition by tbname ")
+ tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,1.000000000)
- tdSql.query(" select twa(c2) from stb1 group by tbname ")
+ tdSql.query(f"select twa(c2) from {dbname}.stb1 group by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,11111.000000000)
- tdSql.query("select twa(c1+c2) from stb1 partition by tbname ")
+ tdSql.query(f"select twa(c1+c2) from {dbname}.stb1 partition by tbname ")
tdSql.checkData(0,0,11112.000000000)
- tdSql.query("select twa(c1) from stb1 partition by t1")
+ tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by t1")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,1.000000000)
# union all
- tdSql.query(" select twa(c1) from stb1 partition by tbname union all select twa(c1) from stb1 partition by tbname ")
+ tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by tbname union all select twa(c1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(40)
tdSql.checkData(0,0,1.000000000)
@@ -112,26 +108,23 @@ class TDTestCase:
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
- tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
- tdSql.execute(" create table tb1 using st tags(1) ")
- tdSql.execute(" create table tb2 using st tags(2) ")
+ tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
+ tdSql.execute(" create table db.tb1 using db.st tags(1) ")
+ tdSql.execute(" create table db.tb2 using db.st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
- tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
- tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
- tdSql.query(" select twa(tb1.c1), twa(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts ")
+ tdSql.query(f"select twa(tb1.c1), twa(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts ")
tdSql.checkRows(1)
tdSql.checkData(0,0,4.500000000)
tdSql.checkData(0,1,4.500000000)
- # group by
- tdSql.execute(" use testdb ")
-
# mixup with other functions
- tdSql.query(" select twa(c1),twa(c2),max(c1),elapsed(ts) from stb1 ")
+ tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.stb1 ")
tdSql.checkData(0,0,1.000000000)
tdSql.checkData(0,1,11111.000000000)
tdSql.checkData(0,2,1)
diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py
index 88767ab888..4040bb71cb 100644
--- a/tests/system-test/2-query/union.py
+++ b/tests/system-test/2-query/union.py
@@ -58,10 +58,10 @@ class TDTestCase:
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
table_reference = tb_list[0]
- join_condition = table_reference
+ join_condition = f'{table_reference} {table_reference.split(".")[-1]}'
join = "inner join" if INNER else "join"
for i in range(len(tb_list[1:])):
- join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
+ join_condition += f" {join} {tb_list[i+1]} {tb_list[i+1].split('.')[-1]} on {table_reference.split('.')[-1]}.{filter}={tb_list[i+1].split('.')[-1]}.{filter}"
return join_condition
@@ -76,7 +76,6 @@ class TDTestCase:
elif query_conditon.startswith("min"):
query_conditon = query_conditon[4:-1]
-
if query_conditon:
return f" where {query_conditon} is not null"
if col in NUM_COL:
@@ -108,10 +107,10 @@ class TDTestCase:
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
@property
- def __join_tblist(self):
+ def __join_tblist(self, dbname="db"):
return [
- ["ct1", "t1"],
- ["ct4", "t1"],
+ [f"{dbname}.ct1", f"{dbname}.t1"],
+ [f"{dbname}.ct4", f"{dbname}.t1"],
# ["ct1", "ct2", "ct4"],
# ["ct1", "ct2", "t1"],
# ["ct1", "ct4", "t1"],
@@ -120,10 +119,10 @@ class TDTestCase:
]
@property
- def __tb_liast(self):
+ def __tb_list(self, dbname="db"):
return [
- "ct1",
- "ct4",
+ f"{dbname}.ct1",
+ f"{dbname}.ct4",
]
def sql_list(self):
@@ -131,7 +130,8 @@ class TDTestCase:
__join_tblist = self.__join_tblist
for join_tblist in __join_tblist:
for join_tb in join_tblist:
- select_claus_list = self.__query_condition(join_tb)
+ join_tb_name = join_tb.split(".")[-1]
+ select_claus_list = self.__query_condition(join_tb_name)
for select_claus in select_claus_list:
group_claus = self.__group_condition( col=select_claus)
where_claus = self.__where_condition(query_conditon=select_claus)
@@ -141,9 +141,10 @@ class TDTestCase:
self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
)
)
- __no_join_tblist = self.__tb_liast
+ __no_join_tblist = self.__tb_list
for tb in __no_join_tblist:
- select_claus_list = self.__query_condition(tb)
+ tb_name = join_tb.split(".")[-1]
+ select_claus_list = self.__query_condition(tb_name)
for select_claus in select_claus_list:
group_claus = self.__group_condition(col=select_claus)
where_claus = self.__where_condition(query_conditon=select_claus)
@@ -230,31 +231,29 @@ class TDTestCase:
else:
tdSql.error(f"{sqls[i]} union {sqls[j+i]}")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
- tdSql.error( "show tables union show tables" )
- tdSql.error( "create table errtb1 union all create table errtb2" )
- tdSql.error( "drop table ct1 union all drop table ct3" )
- tdSql.error( "select c1 from ct1 union all drop table ct3" )
- tdSql.error( "select c1 from ct1 union all '' " )
- tdSql.error( " '' union all select c1 from ct1 " )
- # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
+ tdSql.error( f"show {dbname}.tables union show {dbname}.tables" )
+ tdSql.error( f"create table {dbname}.errtb1 union all create table {dbname}.errtb2" )
+ tdSql.error( f"drop table {dbname}.ct1 union all drop table {dbname}.ct3" )
+ tdSql.error( f"select c1 from {dbname}.ct1 union all drop table {dbname}.ct3" )
+ tdSql.error( f"select c1 from {dbname}.ct1 union all '' " )
+ tdSql.error( f" '' union all select c1 from{dbname}. ct1 " )
def all_test(self):
self.__test_error()
self.union_check()
-
- def __create_tb(self):
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (tag1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -264,30 +263,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
- { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -303,7 +301,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -319,13 +317,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -341,7 +339,6 @@ class TDTestCase:
'''
)
-
def run(self):
tdSql.prepare()
@@ -355,8 +352,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py
index ccf7e287e2..ec77cbbcdc 100644
--- a/tests/system-test/2-query/unique.py
+++ b/tests/system-test/2-query/unique.py
@@ -11,49 +11,46 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -69,84 +66,84 @@ class TDTestCase:
'''
)
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select unique from t1",
- "select unique(123--123)==1 from t1",
- "select unique(123,123) from t1",
- "select unique(c1,ts) from t1",
- "select unique(c1,c1,ts) from t1",
- "select unique(c1) as 'd1' from t1",
- "select unique(c1 ,c2 ) from t1",
- "select unique(c1 ,NULL) from t1",
- "select unique(,) from t1;",
- "select unique(floor(c1) ab from t1)",
- "select unique(c1) as int from t1",
- "select unique('c1') from t1",
- "select unique(NULL) from t1",
- "select unique('') from t1",
- "select unique(c%) from t1",
- "select unique(t1) from t1",
- "select unique(True) from t1",
- "select unique(c1) , count(c1) from t1",
- "select unique(c1) , avg(c1) from t1",
- "select unique(c1) , min(c1) from t1",
- "select unique(c1) , spread(c1) from t1",
- "select unique(c1) , diff(c1) from t1",
- #"select unique(c1) , abs(c1) from t1", # support
- #"select unique(c1) , c1 from t1",
- "select unique from stb1 partition by tbname",
- "select unique(123--123)==1 from stb1 partition by tbname",
- "select unique(123) from stb1 partition by tbname",
- "select unique(c1,ts) from stb1 partition by tbname",
- "select unique(c1,c1,ts) from stb1 partition by tbname",
- "select unique(c1) as 'd1' from stb1 partition by tbname",
- "select unique(c1 ,c2 ) from stb1 partition by tbname",
- "select unique(c1 ,NULL) from stb1 partition by tbname",
- "select unique(,) from stb1 partition by tbname;",
- #"select unique(floor(c1) ab from stb1 partition by tbname)", # support
- #"select unique(c1) as int from stb1 partition by tbname",
- "select unique('c1') from stb1 partition by tbname",
- "select unique(NULL) from stb1 partition by tbname",
- "select unique('') from stb1 partition by tbname",
- "select unique(c%) from stb1 partition by tbname",
- #"select unique(t1) from stb1 partition by tbname", # support
- "select unique(True) from stb1 partition by tbname",
- "select unique(c1) , count(c1) from stb1 partition by tbname",
- "select unique(c1) , avg(c1) from stb1 partition by tbname",
- "select unique(c1) , min(c1) from stb1 partition by tbname",
- "select unique(c1) , spread(c1) from stb1 partition by tbname",
- "select unique(c1) , diff(c1) from stb1 partition by tbname",
- #"select unique(c1) , abs(c1) from stb1 partition by tbname", # support
- #"select unique(c1) , c1 from stb1 partition by tbname" # support
+ f"select unique from {dbname}.t1",
+ f"select unique(123--123)==1 from {dbname}.t1",
+ f"select unique(123,123) from {dbname}.t1",
+ f"select unique(c1,ts) from {dbname}.t1",
+ f"select unique(c1,c1,ts) from {dbname}.t1",
+ f"select unique(c1) as 'd1' from {dbname}.t1",
+ f"select unique(c1 ,c2 ) from {dbname}.t1",
+ f"select unique(c1 ,NULL) from {dbname}.t1",
+ f"select unique(,) from {dbname}.t1;",
+ f"select unique(floor(c1) ab from {dbname}.t1)",
+ f"select unique(c1) as int from {dbname}.t1",
+ f"select unique('c1') from {dbname}.t1",
+ f"select unique(NULL) from {dbname}.t1",
+ f"select unique('') from {dbname}.t1",
+ f"select unique(c%) from {dbname}.t1",
+ f"select unique(t1) from {dbname}.t1",
+ f"select unique(True) from {dbname}.t1",
+ f"select unique(c1) , count(c1) from {dbname}.t1",
+ f"select unique(c1) , avg(c1) from {dbname}.t1",
+ f"select unique(c1) , min(c1) from {dbname}.t1",
+ f"select unique(c1) , spread(c1) from {dbname}.t1",
+ f"select unique(c1) , diff(c1) from {dbname}.t1",
+ #f"select unique(c1) , abs(c1) from {dbname}.t1", # support
+ #f"select unique(c1) , c1 from {dbname}.t1",
+ f"select unique from {dbname}.stb1 partition by tbname",
+ f"select unique(123--123)==1 from {dbname}.stb1 partition by tbname",
+ f"select unique(123) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1,ts) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1,c1,ts) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) as 'd1' from {dbname}.stb1 partition by tbname",
+ f"select unique(c1 ,c2 ) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1 ,NULL) from {dbname}.stb1 partition by tbname",
+ f"select unique(,) from {dbname}.stb1 partition by tbname;",
+ #f"select unique(floor(c1) ab from {dbname}.stb1 partition by tbname)", # support
+ #f"select unique(c1) as int from {dbname}.stb1 partition by tbname",
+ f"select unique('c1') from {dbname}.stb1 partition by tbname",
+ f"select unique(NULL) from {dbname}.stb1 partition by tbname",
+ f"select unique('') from {dbname}.stb1 partition by tbname",
+ f"select unique(c%) from {dbname}.stb1 partition by tbname",
+ #f"select unique(t1) from {dbname}.stb1 partition by tbname", # support
+ f"select unique(True) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , count(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , avg(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , min(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , spread(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , diff(c1) from {dbname}.stb1 partition by tbname",
+ #f"select unique(c1) , abs(c1) from {dbname}.stb1 partition by tbname", # support
+ #f"select unique(c1) , c1 from {dbname}.stb1 partition by tbname" # support
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
pass
- def support_types(self):
+ def support_types(self, dbname="db"):
other_no_value_types = [
- "select unique(ts) from t1" ,
- "select unique(c7) from t1",
- "select unique(c8) from t1",
- "select unique(c9) from t1",
- "select unique(ts) from ct1" ,
- "select unique(c7) from ct1",
- "select unique(c8) from ct1",
- "select unique(c9) from ct1",
- "select unique(ts) from ct3" ,
- "select unique(c7) from ct3",
- "select unique(c8) from ct3",
- "select unique(c9) from ct3",
- "select unique(ts) from ct4" ,
- "select unique(c7) from ct4",
- "select unique(c8) from ct4",
- "select unique(c9) from ct4",
- "select unique(ts) from stb1 partition by tbname" ,
- "select unique(c7) from stb1 partition by tbname",
- "select unique(c8) from stb1 partition by tbname",
- "select unique(c9) from stb1 partition by tbname"
+ f"select unique(ts) from {dbname}.t1" ,
+ f"select unique(c7) from {dbname}.t1",
+ f"select unique(c8) from {dbname}.t1",
+ f"select unique(c9) from {dbname}.t1",
+ f"select unique(ts) from {dbname}.ct1" ,
+ f"select unique(c7) from {dbname}.ct1",
+ f"select unique(c8) from {dbname}.ct1",
+ f"select unique(c9) from {dbname}.ct1",
+ f"select unique(ts) from {dbname}.ct3" ,
+ f"select unique(c7) from {dbname}.ct3",
+ f"select unique(c8) from {dbname}.ct3",
+ f"select unique(c9) from {dbname}.ct3",
+ f"select unique(ts) from {dbname}.ct4" ,
+ f"select unique(c7) from {dbname}.ct4",
+ f"select unique(c8) from {dbname}.ct4",
+ f"select unique(c9) from {dbname}.ct4",
+ f"select unique(ts) from {dbname}.stb1 partition by tbname" ,
+ f"select unique(c7) from {dbname}.stb1 partition by tbname",
+ f"select unique(c8) from {dbname}.stb1 partition by tbname",
+ f"select unique(c9) from {dbname}.stb1 partition by tbname"
]
for type_sql in other_no_value_types:
@@ -154,43 +151,43 @@ class TDTestCase:
tdLog.info("support type ok , sql is : %s"%type_sql)
type_sql_lists = [
- "select unique(c1) from t1",
- "select unique(c2) from t1",
- "select unique(c3) from t1",
- "select unique(c4) from t1",
- "select unique(c5) from t1",
- "select unique(c6) from t1",
+ f"select unique(c1) from {dbname}.t1",
+ f"select unique(c2) from {dbname}.t1",
+ f"select unique(c3) from {dbname}.t1",
+ f"select unique(c4) from {dbname}.t1",
+ f"select unique(c5) from {dbname}.t1",
+ f"select unique(c6) from {dbname}.t1",
- "select unique(c1) from ct1",
- "select unique(c2) from ct1",
- "select unique(c3) from ct1",
- "select unique(c4) from ct1",
- "select unique(c5) from ct1",
- "select unique(c6) from ct1",
+ f"select unique(c1) from {dbname}.ct1",
+ f"select unique(c2) from {dbname}.ct1",
+ f"select unique(c3) from {dbname}.ct1",
+ f"select unique(c4) from {dbname}.ct1",
+ f"select unique(c5) from {dbname}.ct1",
+ f"select unique(c6) from {dbname}.ct1",
- "select unique(c1) from ct3",
- "select unique(c2) from ct3",
- "select unique(c3) from ct3",
- "select unique(c4) from ct3",
- "select unique(c5) from ct3",
- "select unique(c6) from ct3",
+ f"select unique(c1) from {dbname}.ct3",
+ f"select unique(c2) from {dbname}.ct3",
+ f"select unique(c3) from {dbname}.ct3",
+ f"select unique(c4) from {dbname}.ct3",
+ f"select unique(c5) from {dbname}.ct3",
+ f"select unique(c6) from {dbname}.ct3",
- "select unique(c1) from stb1 partition by tbname",
- "select unique(c2) from stb1 partition by tbname",
- "select unique(c3) from stb1 partition by tbname",
- "select unique(c4) from stb1 partition by tbname",
- "select unique(c5) from stb1 partition by tbname",
- "select unique(c6) from stb1 partition by tbname",
+ f"select unique(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c2) from {dbname}.stb1 partition by tbname",
+ f"select unique(c3) from {dbname}.stb1 partition by tbname",
+ f"select unique(c4) from {dbname}.stb1 partition by tbname",
+ f"select unique(c5) from {dbname}.stb1 partition by tbname",
+ f"select unique(c6) from {dbname}.stb1 partition by tbname",
- "select unique(c6) as alisb from stb1 partition by tbname",
- "select unique(c6) alisb from stb1 partition by tbname",
+ f"select unique(c6) as alisb from {dbname}.stb1 partition by tbname",
+ f"select unique(c6) alisb from {dbname}.stb1 partition by tbname",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
def check_unique_table(self , unique_sql):
- # unique_sql = "select unique(c1) from ct1"
+ # unique_sql = f"select unique(c1) from {dbname}.ct1"
origin_sql = unique_sql.replace("unique(","").replace(")","")
tdSql.query(unique_sql)
unique_result = tdSql.queryResult
@@ -219,83 +216,83 @@ class TDTestCase:
else:
tdLog.exit(" unique query check fail , unique sql is: %s " %unique_sql)
- def basic_unique_function(self):
+ def basic_unique_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select unique(c1) from ct3")
+ tdSql.query(f"select unique(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c2) from ct3")
+ tdSql.query(f"select unique(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c3) from ct3")
+ tdSql.query(f"select unique(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c4) from ct3")
+ tdSql.query(f"select unique(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c5) from ct3")
+ tdSql.query(f"select unique(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c6) from ct3")
+ tdSql.query(f"select unique(c6) from {dbname}.ct3")
# will support _rowts mix with
- # tdSql.query("select unique(c6),_rowts from ct3")
+ # tdSql.query(f"select unique(c6),_rowts from {dbname}.ct3")
# auto check for t1 table
# used for regular table
- tdSql.query("select unique(c1) from t1")
+ tdSql.query(f"select unique(c1) from {dbname}.t1")
- tdSql.query("desc t1")
+ tdSql.query(f"desc {dbname}.t1")
col_lists_rows = tdSql.queryResult
col_lists = []
for col_name in col_lists_rows:
col_lists.append(col_name[0])
for col in col_lists:
- self.check_unique_table(f"select unique({col}) from t1")
+ self.check_unique_table(f"select unique({col}) from {dbname}.t1")
# unique with super tags
- tdSql.query("select unique(c1) from ct1")
+ tdSql.query(f"select unique(c1) from {dbname}.ct1")
tdSql.checkRows(10)
- tdSql.query("select unique(c1) from ct4")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4")
tdSql.checkRows(10)
- #tdSql.error("select unique(c1),tbname from ct1") #support
- #tdSql.error("select unique(c1),t1 from ct1") #support
+ #tdSql.error(f"select unique(c1),tbname from {dbname}.ct1") #support
+ #tdSql.error(f"select unique(c1),t1 from {dbname}.ct1") #support
# unique with common col
- #tdSql.error("select unique(c1) ,ts from ct1")
- #tdSql.error("select unique(c1) ,c1 from ct1")
+ #tdSql.error(f"select unique(c1) ,ts from {dbname}.ct1")
+ #tdSql.error(f"select unique(c1) ,c1 from {dbname}.ct1")
# unique with scalar function
- #tdSql.error("select unique(c1) ,abs(c1) from ct1")
- tdSql.error("select unique(c1) , unique(c2) from ct1")
- #tdSql.error("select unique(c1) , abs(c2)+2 from ct1")
+ #tdSql.error(f"select unique(c1) ,abs(c1) from {dbname}.ct1")
+ tdSql.error(f"select unique(c1) , unique(c2) from {dbname}.ct1")
+ #tdSql.error(f"select unique(c1) , abs(c2)+2 from {dbname}.ct1")
# unique with aggregate function
- tdSql.error("select unique(c1) ,sum(c1) from ct1")
- tdSql.error("select unique(c1) ,max(c1) from ct1")
- tdSql.error("select unique(c1) ,csum(c1) from ct1")
- tdSql.error("select unique(c1) ,count(c1) from ct1")
+ tdSql.error(f"select unique(c1) ,sum(c1) from {dbname}.ct1")
+ tdSql.error(f"select unique(c1) ,max(c1) from {dbname}.ct1")
+ tdSql.error(f"select unique(c1) ,csum(c1) from {dbname}.ct1")
+ tdSql.error(f"select unique(c1) ,count(c1) from {dbname}.ct1")
# unique with filter where
- tdSql.query("select unique(c1) from ct4 where c1 is null")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 where c1 is null")
tdSql.checkData(0, 0, None)
- tdSql.query("select unique(c1) from ct4 where c1 >2 order by 1")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 where c1 >2 order by 1")
tdSql.checkData(0, 0, 3)
tdSql.checkData(1, 0, 4)
tdSql.checkData(2, 0, 5)
tdSql.checkData(5, 0, 8)
- tdSql.query("select unique(c1) from ct4 where c2 between 0 and 99999 order by 1 desc")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 where c2 between 0 and 99999 order by 1 desc")
tdSql.checkData(0, 0, 8)
tdSql.checkData(1, 0, 7)
tdSql.checkData(2, 0, 6)
@@ -307,43 +304,43 @@ class TDTestCase:
tdSql.checkData(8, 0, 0)
# unique with union all
- tdSql.query("select unique(c1) from ct4 union all select c1 from ct1")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 union all select c1 from {dbname}.ct1")
tdSql.checkRows(23)
- tdSql.query("select unique(c1) from ct4 union all select distinct(c1) from ct4")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 union all select distinct(c1) from {dbname}.ct4")
tdSql.checkRows(20)
- tdSql.query("select unique(c2) from ct4 union all select abs(c2)/2 from ct4")
+ tdSql.query(f"select unique(c2) from {dbname}.ct4 union all select abs(c2)/2 from {dbname}.ct4")
tdSql.checkRows(22)
# unique with join
# prepare join datas with same ts
tdSql.execute(" use db ")
- tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table tb1 using st1 tags(1)")
- tdSql.execute(" create table tb2 using st1 tags(2)")
+ tdSql.execute(" create stable db.st1 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(" create table db.tb1 using db.st1 tags(1)")
+ tdSql.execute(" create table db.tb2 using db.st1 tags(2)")
- tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table ttb1 using st2 tags(1)")
- tdSql.execute(" create table ttb2 using st2 tags(2)")
+ tdSql.execute(" create stable db.st2 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(" create table db.ttb1 using db.st2 tags(1)")
+ tdSql.execute(" create table db.ttb2 using db.st2 tags(2)")
start_ts = 1622369635000 # 2021-05-30 18:13:55
for i in range(10):
ts_value = start_ts+i*1000
- tdSql.execute(f" insert into tb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into tb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})")
- tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts order by 1")
+ tdSql.query(f"select unique(tb2.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts order by 1")
tdSql.checkRows(10)
tdSql.checkData(0,0,0)
tdSql.checkData(1,0,1)
tdSql.checkData(2,0,2)
tdSql.checkData(9,0,9)
- tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts union all select unique(tb1.num) from tb1, tb2 where tb1.ts=tb2.ts order by 1")
+ tdSql.query(f"select unique(tb2.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts union all select unique(tb1.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts order by 1")
tdSql.checkRows(20)
tdSql.checkData(0,0,0)
tdSql.checkData(2,0,1)
@@ -351,23 +348,23 @@ class TDTestCase:
tdSql.checkData(18,0,9)
# nest query
- # tdSql.query("select unique(c1) from (select c1 from ct1)")
- tdSql.query("select c1 from (select unique(c1) c1 from ct4) order by 1 desc nulls first")
+ # tdSql.query(f"select unique(c1) from (select c1 from {dbname}.ct1)")
+ tdSql.query(f"select c1 from (select unique(c1) c1 from {dbname}.ct4) order by 1 desc nulls first")
tdSql.checkRows(10)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 8)
tdSql.checkData(9, 0, 0)
- tdSql.query("select sum(c1) from (select unique(c1) c1 from ct1)")
+ tdSql.query(f"select sum(c1) from (select unique(c1) c1 from {dbname}.ct1)")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 45)
- tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select unique(c1) c1 from ct1)")
+ tdSql.query(f"select sum(c1) from (select distinct(c1) c1 from {dbname}.ct1) union all select sum(c1) from (select unique(c1) c1 from {dbname}.ct1)")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 45)
tdSql.checkData(1, 0, 45)
- tdSql.query("select 1-abs(c1) from (select unique(c1) c1 from ct4) order by 1 nulls first")
+ tdSql.query(f"select 1-abs(c1) from (select unique(c1) c1 from {dbname}.ct4) order by 1 nulls first")
tdSql.checkRows(10)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, -7.000000000)
@@ -375,104 +372,103 @@ class TDTestCase:
# bug for stable
#partition by tbname
- # tdSql.query(" select unique(c1) from stb1 partition by tbname ")
+ # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ")
# tdSql.checkRows(21)
- # tdSql.query(" select unique(c1) from stb1 partition by tbname ")
+ # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ")
# tdSql.checkRows(21)
# group by
- tdSql.error("select unique(c1) from ct1 group by c1")
- tdSql.error("select unique(c1) from ct1 group by tbname")
+ tdSql.error(f"select unique(c1) from {dbname}.ct1 group by c1")
+ tdSql.error(f"select unique(c1) from {dbname}.ct1 group by tbname")
# super table
# super table
- tdSql.error("select tbname , tail(c1,2) from stb1 group by tbname")
- tdSql.query("select tail(c1,2) from stb1 partition by tbname")
+ tdSql.error(f"select tbname , tail(c1,2) from {dbname}.stb1 group by tbname")
+ tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
# bug need fix
- # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname")
# tdSql.checkRows(4)
- # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname order by tbname")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname order by tbname")
# tdSql.checkRows(4)
- # tdSql.query(" select tbname , count(c1) from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , count(c1) from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , max(c1) ,c1 from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname ,first(c1) from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname ,first(c1) from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- tdSql.query("select tail(c1,2) from stb1 partition by tbname")
+ tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
# # bug need fix
- # tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname ")
+ # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where t1 = 0 partition by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where t1 = 0 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , unique(c1) from stb1 where c1 = 0 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where c1 = 0 partition by tbname order by tbname ")
# tdSql.checkRows(3)
- # tdSql.query(" select tbname , unique(c1) from stb1 where c1 = 0 partition by tbname ")
+ # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where c1 = 0 partition by tbname ")
# tdSql.checkRows(3)
- tdSql.query(" select unique(t1) from stb1 ")
+ tdSql.query(f"select unique(t1) from {dbname}.stb1 ")
tdSql.checkRows(2)
- tdSql.query(" select unique(t1+c1) from stb1 ")
+ tdSql.query(f"select unique(t1+c1) from {dbname}.stb1 ")
tdSql.checkRows(13)
- tdSql.query(" select unique(t1+c1) from stb1 partition by tbname ")
+ tdSql.query(f"select unique(t1+c1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(20)
- tdSql.query(" select unique(t1) from stb1 partition by tbname ")
+ tdSql.query(f"select unique(t1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(2)
# nest query
- tdSql.query(" select unique(c1) from (select _rowts , t1 ,c1 , tbname from stb1 ) ")
+ tdSql.query(f"select unique(c1) from (select _rowts , t1 ,c1 , tbname from {dbname}.stb1 ) ")
tdSql.checkRows(11)
tdSql.checkData(0,0,6)
tdSql.checkData(10,0,3)
- tdSql.query("select unique(t1) from (select _rowts , t1 , tbname from stb1 )")
+ tdSql.query(f"select unique(t1) from (select _rowts , t1 , tbname from {dbname}.stb1 )")
tdSql.checkRows(2)
tdSql.checkData(0,0,4)
tdSql.checkData(1,0,1)
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- tdSql.query("select unique(c2) from sub1_bound order by 1 desc")
+ tdSql.query(f"select unique(c2) from {dbname}.sub1_bound order by 1 desc")
tdSql.checkRows(5)
tdSql.checkData(0,0,9223372036854775807)
diff --git a/tests/system-test/2-query/upper.py b/tests/system-test/2-query/upper.py
index bb485161dd..f15a6f3ba7 100644
--- a/tests/system-test/2-query/upper.py
+++ b/tests/system-test/2-query/upper.py
@@ -95,16 +95,16 @@ class TDTestCase:
return sqls
- def __test_current(self):
+ def __test_current(self, dbname="db"):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__upper_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__upper_err_check(tb):
@@ -112,22 +112,20 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
-
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (tag1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -137,83 +135,82 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
- ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
- ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
+ f'''insert into {dbname}.ct1 values
+ ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
+ ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
- { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000}
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
- { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000}
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
- { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
- { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
- "binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
+ "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
- "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
- "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
-
def run(self):
tdSql.prepare()
@@ -226,8 +223,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/varchar.py b/tests/system-test/2-query/varchar.py
index 5cc6c8e399..17c3ea6333 100644
--- a/tests/system-test/2-query/varchar.py
+++ b/tests/system-test/2-query/varchar.py
@@ -14,43 +14,44 @@ class TDTestCase:
tdSql.init(conn.cursor())
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
+ dbname = "db"
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 varchar(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 varchar(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
tdLog.printNoPrefix("==========step2:insert data")
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'varchar0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'varchar9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'varchar0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'varchar9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "varchar1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "varchar2", "nchar2", now()+2a )
@@ -70,7 +71,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3: cast on varchar")
- tdSql.query("select c8 from ct1")
+ tdSql.query(f"select c8 from {dbname}.ct1")
for i in range(tdSql.queryRows):
tdSql.checkData(i,0, data_ct1_c8[i])
diff --git a/tests/system-test/7-tmq/tmqShow.py b/tests/system-test/7-tmq/tmqShow.py
index 6f8183bf06..c0f33d9204 100644
--- a/tests/system-test/7-tmq/tmqShow.py
+++ b/tests/system-test/7-tmq/tmqShow.py
@@ -19,6 +19,11 @@ class TDTestCase:
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,offset=1,cdbName='cdb'):
+ sql = "insert into %s.consumeinfo values "%cdbName
+ sql += "(now+%ds, %d, '%s', '%s', %d, %d, %d)"%(offset,consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
+ tdLog.info("consume info sql: %s"%sql)
+ tdSql.query(sql)
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
@@ -95,19 +100,23 @@ class TDTestCase:
ifcheckdata = 0
ifManualCommit = 0
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[0]
- tmqCom.insertConsumerInfo(consumerIdList[0], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+ tsOffset=1
+ self.insertConsumerInfo(consumerIdList[0], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit,tsOffset)
topicList = topicNameList[1]
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[1]
- tmqCom.insertConsumerInfo(consumerIdList[1], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+ tsOffset=2
+ self.insertConsumerInfo(consumerIdList[1], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit,tsOffset)
topicList = topicNameList[2]
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[2]
- tmqCom.insertConsumerInfo(consumerIdList[2], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+ tsOffset=3
+ self.insertConsumerInfo(consumerIdList[2], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit,tsOffset)
topicList = topicNameList[3]
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[3]
- tmqCom.insertConsumerInfo(consumerIdList[3], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+ tsOffset=4
+ self.insertConsumerInfo(consumerIdList[3], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit,tsOffset)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index 1e958bdb29..4305ceff56 100755
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -124,44 +124,97 @@ python3 ./test.py -f 2-query/leastsquares.py
python3 ./test.py -f 2-query/leastsquares.py -R
python3 ./test.py -f 2-query/length.py
python3 ./test.py -f 2-query/length.py -R
+python3 ./test.py -f 2-query/log.py
+# python3 ./test.py -f 2-query/log.py -R
+python3 ./test.py -f 2-query/lower.py
+python3 ./test.py -f 2-query/lower.py -R
+python3 ./test.py -f 2-query/ltrim.py
+python3 ./test.py -f 2-query/ltrim.py -R
+python3 ./test.py -f 2-query/mavg.py
+python3 ./test.py -f 2-query/mavg.py -R
+python3 ./test.py -f 2-query/max_partition.py
+python3 ./test.py -f 2-query/max_partition.py -R
+python3 ./test.py -f 2-query/max.py
+python3 ./test.py -f 2-query/max.py -R
+python3 ./test.py -f 2-query/min.py
+python3 ./test.py -f 2-query/min.py -R
+python3 ./test.py -f 2-query/Now.py
+python3 ./test.py -f 2-query/Now.py -R
+python3 ./test.py -f 2-query/percentile.py
+python3 ./test.py -f 2-query/percentile.py -R
+python3 ./test.py -f 2-query/pow.py
+python3 ./test.py -f 2-query/pow.py -R
+python3 ./test.py -f 2-query/query_cols_tags_and_or.py
+python3 ./test.py -f 2-query/query_cols_tags_and_or.py -R
+python3 ./test.py -f 2-query/round.py
+python3 ./test.py -f 2-query/round.py -R
+python3 ./test.py -f 2-query/rtrim.py
+python3 ./test.py -f 2-query/rtrim.py -R
+python3 ./test.py -f 2-query/sample.py
+python3 ./test.py -f 2-query/sample.py -R
+python3 ./test.py -f 2-query/sin.py
+python3 ./test.py -f 2-query/sin.py -R
+python3 ./test.py -f 2-query/smaTest.py
+python3 ./test.py -f 2-query/smaTest.py -R
+python3 ./test.py -f 2-query/sml.py
+python3 ./test.py -f 2-query/sml.py -R
+python3 ./test.py -f 2-query/spread.py
+python3 ./test.py -f 2-query/spread.py -R
+python3 ./test.py -f 2-query/sqrt.py
+python3 ./test.py -f 2-query/sqrt.py -R
+python3 ./test.py -f 2-query/statecount.py
+python3 ./test.py -f 2-query/statecount.py -R
+python3 ./test.py -f 2-query/stateduration.py
+python3 ./test.py -f 2-query/stateduration.py -R
+python3 ./test.py -f 2-query/substr.py
+python3 ./test.py -f 2-query/substr.py -R
+python3 ./test.py -f 2-query/sum.py
+python3 ./test.py -f 2-query/sum.py -R
+python3 ./test.py -f 2-query/tail.py
+python3 ./test.py -f 2-query/tail.py -R
+python3 ./test.py -f 2-query/tan.py
+# python3 ./test.py -f 2-query/tan.py -R
+python3 ./test.py -f 2-query/Timediff.py
+python3 ./test.py -f 2-query/Timediff.py -R
+python3 ./test.py -f 2-query/timetruncate.py
+# python3 ./test.py -f 2-query/timetruncate.py -R
+python3 ./test.py -f 2-query/timezone.py
+python3 ./test.py -f 2-query/timezone.py -R
+python3 ./test.py -f 2-query/To_iso8601.py
+python3 ./test.py -f 2-query/To_iso8601.py -R
+python3 ./test.py -f 2-query/To_unixtimestamp.py
+python3 ./test.py -f 2-query/To_unixtimestamp.py -R
+python3 ./test.py -f 2-query/Today.py
+# python3 ./test.py -f 2-query/Today.py -R
+python3 ./test.py -f 2-query/top.py
+python3 ./test.py -f 2-query/top.py -R
+python3 ./test.py -f 2-query/tsbsQuery.py
+python3 ./test.py -f 2-query/tsbsQuery.py -R
+python3 ./test.py -f 2-query/ttl_comment.py
+python3 ./test.py -f 2-query/ttl_comment.py -R
+python3 ./test.py -f 2-query/twa.py
+python3 ./test.py -f 2-query/twa.py -R
+python3 ./test.py -f 2-query/union.py
+python3 ./test.py -f 2-query/union.py -R
+python3 ./test.py -f 2-query/unique.py
+python3 ./test.py -f 2-query/unique.py -R
+python3 ./test.py -f 2-query/upper.py
+python3 ./test.py -f 2-query/upper.py -R
+python3 ./test.py -f 2-query/varchar.py
+python3 ./test.py -f 2-query/varchar.py -R
+
python3 ./test.py -f 1-insert/update_data.py
python3 ./test.py -f 1-insert/delete_data.py
-python3 ./test.py -f 2-query/varchar.py
-python3 ./test.py -f 2-query/ltrim.py
-python3 ./test.py -f 2-query/rtrim.py
-python3 ./test.py -f 2-query/upper.py
-python3 ./test.py -f 2-query/lower.py
python3 ./test.py -f 2-query/join2.py
-python3 ./test.py -f 2-query/substr.py
-python3 ./test.py -f 2-query/union.py
python3 ./test.py -f 2-query/union1.py
python3 ./test.py -f 2-query/concat2.py
-python3 ./test.py -f 2-query/spread.py
-python3 ./test.py -f 2-query/timezone.py
-python3 ./test.py -f 2-query/Now.py
-python3 ./test.py -f 2-query/Today.py
-python3 ./test.py -f 2-query/max.py
-python3 ./test.py -f 2-query/min.py
-python3 ./test.py -f 2-query/To_iso8601.py
-python3 ./test.py -f 2-query/To_unixtimestamp.py
-python3 ./test.py -f 2-query/timetruncate.py
-python3 ./test.py -f 2-query/Timediff.py
python3 ./test.py -f 2-query/json_tag.py
-python3 ./test.py -f 2-query/top.py
-python3 ./test.py -f 2-query/percentile.py
-python3 ./test.py -f 2-query/round.py
-python3 ./test.py -f 2-query/log.py
-python3 ./test.py -f 2-query/pow.py
-python3 ./test.py -f 2-query/sqrt.py
-python3 ./test.py -f 2-query/sin.py
-python3 ./test.py -f 2-query/tan.py
-python3 ./test.py -f 2-query/query_cols_tags_and_or.py
# python3 ./test.py -f 2-query/nestedQuery.py
# TD-15983 subquery output duplicate name column.
# Please Xiangyang Guo modify the following script
@@ -169,18 +222,8 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py
python3 ./test.py -f 2-query/elapsed.py
python3 ./test.py -f 2-query/csum.py
-python3 ./test.py -f 2-query/mavg.py
-python3 ./test.py -f 2-query/sample.py
python3 ./test.py -f 2-query/function_diff.py
-python3 ./test.py -f 2-query/unique.py
-python3 ./test.py -f 2-query/stateduration.py
-python3 ./test.py -f 2-query/statecount.py
-python3 ./test.py -f 2-query/tail.py
-python3 ./test.py -f 2-query/ttl_comment.py
-python3 ./test.py -f 2-query/twa.py
python3 ./test.py -f 2-query/queryQnode.py
-python3 ./test.py -f 2-query/max_partition.py
-python3 ./test.py -f 2-query/tsbsQuery.py
python3 ./test.py -f 6-cluster/5dnode1mnode.py
python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
@@ -271,8 +314,8 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
-#python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
-#python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
+python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
+python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py
python3 ./test.py -f 7-tmq/tmqUpdateWithConsume.py
python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py
@@ -358,7 +401,7 @@ python3 ./test.py -f 2-query/interp.py -Q 2
python3 ./test.py -f 2-query/avg.py -Q 2
# python3 ./test.py -f 2-query/elapsed.py -Q 2
python3 ./test.py -f 2-query/csum.py -Q 2
-python3 ./test.py -f 2-query/mavg.py -Q 2
+#python3 ./test.py -f 2-query/mavg.py -Q 2
python3 ./test.py -f 2-query/sample.py -Q 2
python3 ./test.py -f 2-query/function_diff.py -Q 2
python3 ./test.py -f 2-query/unique.py -Q 2
@@ -445,7 +488,7 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3
# python3 ./test.py -f 2-query/avg.py -Q 3
# python3 ./test.py -f 2-query/elapsed.py -Q 3
python3 ./test.py -f 2-query/csum.py -Q 3
-python3 ./test.py -f 2-query/mavg.py -Q 3
+#python3 ./test.py -f 2-query/mavg.py -Q 3
python3 ./test.py -f 2-query/sample.py -Q 3
python3 ./test.py -f 2-query/function_diff.py -Q 3
python3 ./test.py -f 2-query/unique.py -Q 3
@@ -471,3 +514,4 @@ python3 ./test.py -f 2-query/last_row.py -Q 3
python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
python3 ./test.py -f 2-query/sml.py -Q 3
python3 ./test.py -f 2-query/interp.py -Q 3
+
diff --git a/tests/test/c/sdbDump.c b/tests/test/c/sdbDump.c
index aef5056031..b90b781e44 100644
--- a/tests/test/c/sdbDump.c
+++ b/tests/test/c/sdbDump.c
@@ -20,6 +20,9 @@
#include "tconfig.h"
#include "tjson.h"
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
+
#define TMP_DNODE_DIR TD_TMP_DIR_PATH "dumpsdb"
#define TMP_MNODE_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode"
#define TMP_SDB_DATA_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "data"
@@ -429,6 +432,7 @@ int32_t parseArgs(int32_t argc, char *argv[]) {
char cmd[PATH_MAX * 2] = {0};
snprintf(cmd, sizeof(cmd), "rm -rf %s", TMP_DNODE_DIR);
+
system(cmd);
#ifdef WINDOWS
taosMulMkDir(TMP_SDB_DATA_DIR);
@@ -467,3 +471,5 @@ int32_t main(int32_t argc, char *argv[]) {
return dumpSdb();
}
+
+#pragma GCC diagnostic pop
\ No newline at end of file
diff --git a/tests/test/c/tmqSim.c b/tests/test/c/tmqSim.c
index d39ade7e91..71b31ba107 100644
--- a/tests/test/c/tmqSim.c
+++ b/tests/test/c/tmqSim.c
@@ -492,7 +492,6 @@ static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) {
static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* field, int32_t length,
int32_t precision) {
if (val == NULL) {
- taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR);
return;
}
@@ -540,13 +539,34 @@ static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* f
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_JSON:
- memcpy(buf, val, length);
- buf[length] = 0;
- taosFprintfFile(pFile, "\'%s\'", buf);
+ {
+ char quotationStr[2];
+ int32_t bufIndex = 0;
+ quotationStr[0] = 0;
+ quotationStr[1] = 0;
+ for (int32_t i = 0; i < length; i++) {
+ buf[bufIndex] = val[i];
+ bufIndex++;
+ if (val[i] == '\"') {
+ buf[bufIndex] = val[i];
+ bufIndex++;
+ quotationStr[0] = '\"';
+ }
+ if (val[i] == ',') {
+ quotationStr[0] = '\"';
+ }
+ }
+ buf[bufIndex] = 0;
+ if (length == 0) {
+ quotationStr[0] = '\"';
+ }
+
+ taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr);
+ }
break;
case TSDB_DATA_TYPE_TIMESTAMP:
shellFormatTimestamp(buf, *(int64_t*)val, precision);
- taosFprintfFile(pFile, "'%s'", buf);
+ taosFprintfFile(pFile, "%s", buf);
break;
default:
break;
diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c
index b993a8dbf1..16732ff9a1 100644
--- a/tests/tsim/src/simExe.c
+++ b/tests/tsim/src/simExe.c
@@ -464,7 +464,10 @@ void simStoreSystemContentResult(SScript *script, char *filename) {
taosCloseFile(&pFile);
char rmCmd[MAX_FILE_NAME_LEN] = {0};
sprintf(rmCmd, "rm -f %s", filename);
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
system(rmCmd);
+#pragma GCC diagnostic pop
}
}
diff --git a/tools/shell/inc/shellInt.h b/tools/shell/inc/shellInt.h
index 26ca6895ac..15f6f6dc6a 100644
--- a/tools/shell/inc/shellInt.h
+++ b/tools/shell/inc/shellInt.h
@@ -113,7 +113,7 @@ int32_t shellExecute();
int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision);
void shellPrintHeader(TAOS_FIELD *fields, int32_t *width, int32_t num_fields);
void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t length, int32_t precision);
-void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision);
+void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision);
// shellUtil.c
int32_t shellCheckIntSize();
void shellPrintVersion();
diff --git a/tools/shell/src/shellCommand.c b/tools/shell/src/shellCommand.c
index d87e10fd08..b73317e991 100644
--- a/tools/shell/src/shellCommand.c
+++ b/tools/shell/src/shellCommand.c
@@ -510,7 +510,10 @@ int32_t shellReadCommand(char *command) {
shellClearLineAfter(&cmd);
break;
case 12: // Ctrl + L;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
system("clear");
+#pragma GCC diagnostic pop
shellShowOnScreen(&cmd);
break;
case 21: // Ctrl + U;
diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c
index 68e3a272c3..45d5489803 100644
--- a/tools/shell/src/shellEngine.c
+++ b/tools/shell/src/shellEngine.c
@@ -62,7 +62,10 @@ int32_t shellRunSingleCommand(char *command) {
}
if (shellRegexMatch(command, "^[\t ]*clear[ \t;]*$", REG_EXTENDED | REG_ICASE)) {
- system("clear");
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
+ system("clear");
+#pragma GCC diagnostic pop
return 0;
}
@@ -266,7 +269,6 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) {
void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision) {
if (val == NULL) {
- taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR);
return;
}
@@ -314,13 +316,34 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_JSON:
- memcpy(buf, val, length);
- buf[length] = 0;
- taosFprintfFile(pFile, "\'%s\'", buf);
+ {
+ char quotationStr[2];
+ int32_t bufIndex = 0;
+ quotationStr[0] = 0;
+ quotationStr[1] = 0;
+ for (int32_t i = 0; i < length; i++) {
+ buf[bufIndex] = val[i];
+ bufIndex++;
+ if (val[i] == '\"') {
+ buf[bufIndex] = val[i];
+ bufIndex++;
+ quotationStr[0] = '\"';
+ }
+ if (val[i] == ',') {
+ quotationStr[0] = '\"';
+ }
+ }
+ buf[bufIndex] = 0;
+ if (length == 0) {
+ quotationStr[0] = '\"';
+ }
+
+ taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr);
+ }
break;
case TSDB_DATA_TYPE_TIMESTAMP:
shellFormatTimestamp(buf, *(int64_t *)val, precision);
- taosFprintfFile(pFile, "'%s'", buf);
+ taosFprintfFile(pFile, "%s", buf);
break;
default:
break;
diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c
index 2dcab04b3f..b8b8392b96 100644
--- a/tools/shell/src/shellWebsocket.c
+++ b/tools/shell/src/shellWebsocket.c
@@ -18,19 +18,19 @@
#include "shellInt.h"
int shell_conn_ws_server(bool first) {
- shell.ws_conn = ws_connect_with_dsn(shell.args.dsn);
- if (!shell.ws_conn) {
- fprintf(stderr, "failed to connect %s, reason: %s\n",
- shell.args.dsn, ws_errstr(NULL));
- return -1;
- }
- if (first && shell.args.restful) {
- fprintf(stdout, "successfully connect to %s\n\n",
- shell.args.dsn);
- } else if (first && shell.args.cloud) {
- fprintf(stdout, "successfully connect to cloud service\n");
- }
- return 0;
+ shell.ws_conn = ws_connect_with_dsn(shell.args.dsn);
+ if (!shell.ws_conn) {
+ fprintf(stderr, "failed to connect %s, reason: %s\n",
+ shell.args.dsn, ws_errstr(NULL));
+ return -1;
+ }
+ if (first && shell.args.restful) {
+ fprintf(stdout, "successfully connect to %s\n\n",
+ shell.args.dsn);
+ } else if (first && shell.args.cloud) {
+ fprintf(stdout, "successfully connect to cloud service\n");
+ }
+ return 0;
}
static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) {
@@ -39,7 +39,7 @@ static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) {
ws_fetch_block(wres, &data, &rows);
*execute_time += (double)(ws_take_timing(wres)/1E6);
if (!rows) {
- return 0;
+ return 0;
}
int num_fields = ws_field_count(wres);
TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres);
@@ -64,7 +64,7 @@ static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) {
putchar(' ');
putchar('|');
}
- putchar('\r');
+ putchar('\r');
putchar('\n');
}
numOfRows += rows;
@@ -79,7 +79,7 @@ static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) {
ws_fetch_block(wres, &data, &rows);
*pexecute_time += (double)(ws_take_timing(wres)/1E6);
if (!rows) {
- return 0;
+ return 0;
}
int num_fields = ws_field_count(wres);
TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres);
@@ -98,7 +98,7 @@ static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) {
uint32_t len;
for (int i = 0; i < rows; i++) {
printf("*************************** %d.row ***************************\n",
- numOfRows + 1);
+ numOfRows + 1);
for (int j = 0; j < num_fields; j++) {
TAOS_FIELD* field = fields + j;
int padding = (int)(maxColNameLen - strlen(field->name));
@@ -121,7 +121,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute
}
TdFilePtr pFile = taosOpenFile(fullname,
- TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM);
+ TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM);
if (pFile == NULL) {
fprintf(stderr, "failed to open file: %s\r\n", fullname);
return -1;
@@ -132,7 +132,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute
*pexecute_time += (double)(ws_take_timing(wres)/1E6);
if (!rows) {
taosCloseFile(&pFile);
- return 0;
+ return 0;
}
int numOfRows = 0;
TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres);
@@ -207,7 +207,7 @@ void shellRunSingleCommandWebsocketImp(char *command) {
}
if (!shell.ws_conn && shell_conn_ws_server(0)) {
- return;
+ return;
}
shell.stop_query = false;
@@ -216,16 +216,16 @@ void shellRunSingleCommandWebsocketImp(char *command) {
WS_RES* res = ws_query_timeout(shell.ws_conn, command, shell.args.timeout);
int code = ws_errno(res);
if (code != 0) {
- et = taosGetTimestampUs();
- fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6);
- if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) {
- fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n");
- } else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) {
- fprintf(stderr, "TDengine server is down, will try to reconnect\n");
- shell.ws_conn = NULL;
- }
- ws_free_result(res);
- return;
+ et = taosGetTimestampUs();
+ fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6);
+ if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) {
+ fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n");
+ } else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) {
+ fprintf(stderr, "TDengine server is down, will try to reconnect\n");
+ shell.ws_conn = NULL;
+ }
+ ws_free_result(res);
+ return;
}
double execute_time = ws_take_timing(res)/1E6;
@@ -233,36 +233,36 @@ void shellRunSingleCommandWebsocketImp(char *command) {
if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) {
fprintf(stdout, "Database changed.\r\n\r\n");
fflush(stdout);
- ws_free_result(res);
+ ws_free_result(res);
return;
}
int numOfRows = 0;
if (ws_is_update_query(res)) {
- numOfRows = ws_affected_rows(res);
- et = taosGetTimestampUs();
+ numOfRows = ws_affected_rows(res);
+ et = taosGetTimestampUs();
double total_time = (et - st)/1E3;
double net_time = total_time - (double)execute_time;
- printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows);
+ printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows);
printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
} else {
- int error_no = 0;
- numOfRows = shellDumpWebsocket(res, fname, &error_no, printMode, &execute_time);
- if (numOfRows < 0) {
- ws_free_result(res);
- return;
- }
- et = taosGetTimestampUs();
+ int error_no = 0;
+ numOfRows = shellDumpWebsocket(res, fname, &error_no, printMode, &execute_time);
+ if (numOfRows < 0) {
+ ws_free_result(res);
+ return;
+ }
+ et = taosGetTimestampUs();
double total_time = (et - st) / 1E3;
double net_time = total_time - execute_time;
- if (error_no == 0 && !shell.stop_query) {
- printf("Query OK, %d row(s) in set\n", numOfRows);
- printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
- } else {
- printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows,
- (et - st)/1E6);
- printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
- }
+ if (error_no == 0 && !shell.stop_query) {
+ printf("Query OK, %d row(s) in set\n", numOfRows);
+ printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
+ } else {
+ printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows,
+ (et - st)/1E6);
+ printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
+ }
}
printf("\n");
ws_free_result(res);