diff --git a/cmake/cmake.platform b/cmake/cmake.platform
index 887fbd86d5..3aa1ffc07e 100644
--- a/cmake/cmake.platform
+++ b/cmake/cmake.platform
@@ -46,7 +46,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.")
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
- MESSAGE("Current system arch is arm64")
+ MESSAGE("Current system arch is 64")
SET(TD_DARWIN_64 TRUE)
ADD_DEFINITIONS("-D_TD_DARWIN_64")
ENDIF ()
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index 9547323acf..68caf9a9ac 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG e8bfca6
+ GIT_TAG 9cb965f
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/docs/en/01-index.md b/docs/en/01-index.md
index 22e62bc5e0..5265be42f8 100644
--- a/docs/en/01-index.md
+++ b/docs/en/01-index.md
@@ -4,25 +4,24 @@ sidebar_label: Documentation Home
slug: /
---
-
-TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
+TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
-TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly.
+TDengine greatly improves the efficiency of data ingestion, querying, and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [Concepts](./concept) thoroughly.
-If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work.
+If you are a developer, please read the [Developer Guide](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, and make a few changes to accommodate your application, and it will work.
-We live in the era of big data, and scale-up is unable to meet the growing needs of business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster deployment"](../deployment).
+We live in the era of big data, and scale-up is unable to meet the growing needs of the business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to [Cluster Deployment](../deployment).
-TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions.
+TDengine uses ubiquitous SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll-up, interpolation, and time-weighted average, among many others. The [SQL Reference](./taos-sql) chapter describes the SQL syntax in detail and lists the various supported commands and functions.
-If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section.
+If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the [Administration](./operation) section.
-If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter.
+If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the [Reference](./reference) chapter.
-If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
+If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
-TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
+TDengine is an open-source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
-Together, we make a difference.
+Together, we make a difference!
diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md
index 5d21fbaf90..d385845d7c 100644
--- a/docs/en/02-intro/index.md
+++ b/docs/en/02-intro/index.md
@@ -3,7 +3,7 @@ title: Introduction
toc_max_heading_level: 2
---
-TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](/develop/stream), [data subscription](/develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
+TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
@@ -12,34 +12,34 @@ This section introduces the major features, competitive advantages, typical use-
The major features are listed below:
1. Insert data
- * supports [using SQL to insert](/develop/insert-data/sql-writing).
- * supports [schemaless writing](/reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) among others.
- * supports seamless integration with third-party tools like [Telegraf](/third-party/telegraf/), [Prometheus](/third-party/prometheus/), [collectd](/third-party/collectd/), [StatsD](/third-party/statsd/), [TCollector](/third-party/tcollector/) and [icinga2/](/third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code.
+ - Supports [using SQL to insert](../develop/insert-data/sql-writing).
+ - Supports [schemaless writing](../reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB Line](../develop/insert-data/influxdb-line), [OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json) among others.
+ - Supports seamless integration with third-party tools like [Telegraf](../third-party/telegraf/), [Prometheus](../third-party/prometheus/), [collectd](../third-party/collectd/), [StatsD](../third-party/statsd/), [TCollector](../third-party/tcollector/), [EMQX](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker), and [Icinga2](../third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code.
2. Query data
- * supports standard [SQL](/taos-sql/), including nested query.
- * supports [time series specific functions](/taos-sql/function/#time-series-extensions) and [time series specific queries](/taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others.
- * supports [user defined functions](/taos-sql/udf).
-3. [Caching](/develop/cache/): TDengine always saves the last data point in cache, so Redis is not needed for time-series data processing.
-4. [Stream Processing](/develop/stream/): not only is the continuous query is supported, but TDengine also supports even driven stream processing, so Flink or spark is not needed for time-series daata processing.
-5. [Data Dubscription](/develop/tmq/): application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
+ - Supports standard [SQL](../taos-sql/), including nested query.
+ - Supports [time series specific functions](../taos-sql/function/#time-series-extensions) and [time series specific queries](../taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others.
+ - Supports [User Defined Functions (UDF)](../taos-sql/udf).
+3. [Caching](../develop/cache/): TDengine always saves the last data point in cache, so Redis is not needed for time-series data processing.
+4. [Stream Processing](../develop/stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing.
+5. [Data Subscription](../develop/tmq/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
6. Visualization
- * supports seamless integration with [Grafana](/third-party/grafana/) for visualization.
- * supports seamless integration with Google Data Studio.
+ - Supports seamless integration with [Grafana](../third-party/grafana/) for visualization.
+ - Supports seamless integration with Google Data Studio.
7. Cluster
- * supports [cluster](/deployment/) with the capability of increasing processing power by adding more nodes.
- * supports [deployment on Kubernetes](/deployment/k8s/)
- * supports high availability via data replication.
+ - Supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
+ - Supports [deployment on Kubernetes](../deployment/k8s/).
+ - Supports high availability via data replication.
8. Administration
- * provides [monitoring](/operation/monitor) on running instances of TDengine.
- * provides many ways to [import](/operation/import) and [export](/operation/export) data.
+ - Provides [monitoring](../operation/monitor) on running instances of TDengine.
+ - Provides many ways to [import](../operation/import) and [export](../operation/export) data.
9. Tools
- * provides an interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc queries.
- * provides a tool [taosBenchmark](/reference/taosbenchmark/) for testing the performance of TDengine.
+ - Provides an interactive [Command-line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries.
+ - Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
10. Programming
- * provides [connectors](/reference/connector/) for [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) and other programming languages.
- * provides a [REST API](/reference/rest-api/).
+ - Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
+ - Provides a [REST API](../reference/rest-api/).
-For more details on features, please read through the entire documentation.
+For more details on features, please read through the entire documentation.
## Competitive Advantages
@@ -49,23 +49,31 @@ By making full use of [characteristics of time series data](https://tdengine.com
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
-- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
+- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds.
- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[
-](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
+ ](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
-- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
+- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
-With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly;2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly;3: With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
+With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
+
+1. With its superior performance, the computing and storage resources are reduced significantly.
+2. With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly.
+3. With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
## Technical Ecosystem
+
This is how TDengine would be situated, in a typical time-series data processing platform:
+
+

-Figure 1. TDengine Technical Ecosystem
+Figure 1. TDengine Technical Ecosystem
+
On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance.
@@ -75,42 +83,42 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
### Characteristics and Requirements of Data Sources
-| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
-| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry.|
-| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
-| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
+| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------ | ------------------ | ----------------------- | ------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry. |
+| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
+| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
### System Architecture Requirements
-| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
+| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ----------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| A simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions. There is no need to integrate any additional third-party products. |
-| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
-| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
+| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
+| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
### System Function Requirements
-| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level.|
-| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
+| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| -------------------------------------------- | ------------------ | ----------------------- | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level. |
+| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
### System Performance Requirements
-| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
-| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products.|
-| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
+| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
+| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
+| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
+| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
### System Maintenance Requirements
-| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
-| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.|
-| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.|
+| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| --------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
+| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs. |
+| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine. |
## Comparison with other databases
diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md
index 5a9c55fdd6..b0a0c25d85 100644
--- a/docs/en/04-concept/index.md
+++ b/docs/en/04-concept/index.md
@@ -162,7 +162,7 @@ To better understand the data model using metri, tags, super table and subtable,
## Database
-A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases.
+A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases.
In a database, there can be one or more STables, but a STable belongs to only one database. All tables owned by a STable are stored in only one database.
diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md
index 2053706421..901fe69d24 100644
--- a/docs/en/07-develop/01-connect/index.md
+++ b/docs/en/07-develop/01-connect/index.md
@@ -279,6 +279,6 @@ Prior to establishing connection, please make sure TDengine is already running a
:::tip
-If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.taosdata.com/train-faq/faq).
+If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
:::
diff --git a/docs/en/07-develop/03-insert-data/05-high-volume.md b/docs/en/07-develop/03-insert-data/05-high-volume.md
index 8163ae03b2..9ea0c88447 100644
--- a/docs/en/07-develop/03-insert-data/05-high-volume.md
+++ b/docs/en/07-develop/03-insert-data/05-high-volume.md
@@ -16,7 +16,7 @@ To achieve high performance writing, there are a few aspects to consider. In the
From the perspective of application program, you need to consider:
-1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB. It can be configured by parameter `maxSQLLength` on client side, and the default value is 65,480.
+1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB.
2. The number of concurrent connections. Normally more connections can get better result. However, once the number of connections exceeds the processing ability of the server side, the performance may downgrade.
@@ -46,12 +46,9 @@ If the data source is Kafka, then the appication program is a consumer of Kafka,
### Tune TDengine
-TDengine is a distributed and high performance time series database, there are also some ways to tune TDengine to get better writing performance.
+On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
-1. Set proper number of `vgroups` according to available CPU cores. Normally, we recommend 2 \* number_of_cores as a starting point. If the verification result shows this is not enough to utilize CPU resources, you can use a higher value.
-2. Set proper `minTablesPerVnode`, `tableIncStepPerVnode`, and `maxVgroupsPerDb` according to the number of tables so that tables are distributed even across vgroups. The purpose is to balance the workload among all vnodes so that system resources can be utilized better to get higher performance.
-
-For more performance tuning parameters, please refer to [Configuration Parameters](../../../reference/config).
+For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。
## Sample Programs
@@ -359,7 +356,7 @@ Writing process tries to read as much as possible data from message queue and wr
-SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, if the SQL length is closed to `maxSQLLength` the SQL will be executed immediately. To improve writing efficiency, it's better to increase `maxSQLLength` properly.
+SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, and passes the maximum SQL length by parameter maxSQLLength according to actual TDengine limit.
SQLWriter
diff --git a/docs/en/10-deployment/01-deploy.md b/docs/en/10-deployment/01-deploy.md
index a445b684dc..5dfcd3108d 100644
--- a/docs/en/10-deployment/01-deploy.md
+++ b/docs/en/10-deployment/01-deploy.md
@@ -39,18 +39,18 @@ To get the hostname on any host, the command `hostname -f` can be executed.
On the physical machine running the application, ping the dnode that is running taosd. If the dnode is not accessible, the application cannot connect to taosd. In this case, verify the DNS and hosts settings on the physical node running the application.
-The end point of each dnode is the output hostname and port, such as h1.taosdata.com:6030.
+The end point of each dnode is the output hostname and port, such as h1.tdengine.com:6030.
### Step 5
-Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following.
+Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.tdengine.com:6030", its `taos.cfg` is configured as following.
```c
// firstEp is the end point to connect to when any dnode starts
-firstEp h1.taosdata.com:6030
+firstEp h1.tdengine.com:6030
// must be configured to the FQDN of the host where the dnode is launched
-fqdn h1.taosdata.com
+fqdn h1.tdengine.com
// the port used by the dnode, default is 6030
serverPort 6030
@@ -76,13 +76,13 @@ The first dnode can be started following the instructions in [Get Started](/get-
taos> show dnodes;
id | endpoint | vnodes | support_vnodes | status | create_time | note |
============================================================================================================================================
-1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
+1 | h1.tdengine.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
Query OK, 1 rows affected (0.007984s)
```
-From the above output, it is shown that the end point of the started dnode is "h1.taosdata.com:6030", which is the `firstEp` of the cluster.
+From the above output, it is shown that the end point of the started dnode is "h1.tdengine.com:6030", which is the `firstEp` of the cluster.
## Add DNODE
@@ -90,7 +90,7 @@ There are a few steps necessary to add other dnodes in the cluster.
Second, we can start `taosd` as instructed in [Get Started](/get-started/).
-Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command:
+Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `taos` to execute the following command:
```sql
CREATE DNODE "h2.taos.com:6030";
@@ -98,7 +98,7 @@ CREATE DNODE "h2.taos.com:6030";
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
-Then on the first dnode h1.taosdata.com, execute `show dnodes` in `taos`
+Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos`
```sql
SHOW DNODES;
diff --git a/docs/en/10-deployment/05-helm.md b/docs/en/10-deployment/05-helm.md
index 302730f1b5..a4fa681000 100644
--- a/docs/en/10-deployment/05-helm.md
+++ b/docs/en/10-deployment/05-helm.md
@@ -152,7 +152,7 @@ clusterDomainSuffix: ""
# converting an upper-snake-cased variable like `TAOS_DEBUG_FLAG`,
# to a camelCase taos config variable `debugFlag`.
#
-# See the variable list at https://www.taosdata.com/cn/documentation/administrator .
+# See the [Configuration Variables](../../reference/config)
#
# Note:
# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md
index 37438ee780..b9a3fa2321 100644
--- a/docs/en/12-taos-sql/25-grant.md
+++ b/docs/en/12-taos-sql/25-grant.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Permissions Management
-title: Permissions Management
+sidebar_label: Access Control
+title: User and Access Control
+description: Manage user and user's permission
---
This document describes how to manage permissions in TDengine.
diff --git a/docs/en/12-taos-sql/index.md b/docs/en/12-taos-sql/index.md
index e243cd2318..a5ffc9dc8d 100644
--- a/docs/en/12-taos-sql/index.md
+++ b/docs/en/12-taos-sql/index.md
@@ -1,6 +1,6 @@
---
title: TDengine SQL
-description: "The syntax supported by TDengine SQL "
+description: 'The syntax supported by TDengine SQL '
---
This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. For information about how TDengine SQL has changed, see [Changes in TDengine 3.0](../taos-sql/changes).
@@ -15,7 +15,7 @@ Syntax Specifications used in this chapter:
- | means one of a few options, excluding | itself.
- … means the item prior to it can be repeated multiple times.
-To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
+To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
```
taos> DESCRIBE meters;
diff --git a/docs/en/13-operation/01-pkg-install.md b/docs/en/13-operation/01-pkg-install.md
index 53da672daa..d7713b943f 100644
--- a/docs/en/13-operation/01-pkg-install.md
+++ b/docs/en/13-operation/01-pkg-install.md
@@ -1,12 +1,12 @@
---
-title: Install & Uninstall
+title: Install and Uninstall
description: Install, Uninstall, Start, Stop and Upgrade
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers.
+This document gives more information about installing, uninstalling, and upgrading TDengine.
## Install
@@ -56,7 +56,7 @@ Removing taostools (2.1.2) ...
Deb package of TDengine can be uninstalled as below:
-```bash
+```
$ sudo dpkg -r tdengine
(Reading database ... 137504 files and directories currently installed.)
Removing tdengine (3.0.0.0) ...
@@ -110,109 +110,57 @@ Start to uninstall taos tools ...
taos tools is uninstalled successfully!
```
+
+
+Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system.
-:::note
+:::info
-- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine.
-- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
+- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. The packages may affect each other and cause errors.
-```bash
- $ sudo rm -f /var/lib/dpkg/info/tdengine*
-```
+- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
-- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
+ ```
+ $ sudo rm -f /var/lib/dpkg/info/tdengine*
+ ```
-```bash
- $ sudo rpm -e --noscripts tdengine
-```
+You can then reinstall if needed.
+
+- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
+
+ ```
+ $ sudo rpm -e --noscripts tdengine
+ ```
+
+You can then reinstall if needed.
:::
-## Installation Directory
-
-TDengine is installed at /usr/local/taos if successful.
-
-```bash
-$ cd /usr/local/taos
-$ ll
-$ ll
-total 28
-drwxr-xr-x 7 root root 4096 Feb 22 09:34 ./
-drwxr-xr-x 12 root root 4096 Feb 22 09:34 ../
-drwxr-xr-x 2 root root 4096 Feb 22 09:34 bin/
-drwxr-xr-x 2 root root 4096 Feb 22 09:34 cfg/
-lrwxrwxrwx 1 root root 13 Feb 22 09:34 data -> /var/lib/taos/
-drwxr-xr-x 2 root root 4096 Feb 22 09:34 driver/
-drwxr-xr-x 10 root root 4096 Feb 22 09:34 examples/
-drwxr-xr-x 2 root root 4096 Feb 22 09:34 include/
-lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
-```
-
-During the installation process:
-
-- Configuration directory, data directory, and log directory are created automatically if they don't exist
-- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg
-- The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data
-- The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log
-- The executables at /usr/local/taos/bin are linked to /usr/bin
-- The DLL files at /usr/local/taos/driver are linked to /usr/lib
-- The header files at /usr/local/taos/include are linked to /usr/include
-
-:::note
+Uninstalling and Modifying Files
- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
+
- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
-## Start and Stop
-
-Linux system services `systemd`, `systemctl` or `service` are used to start, stop and restart TDengine. The server process of TDengine is `taosd`, which is started automatically after the Linux system is started. System operators can use `systemd`, `systemctl` or `service` to start, stop or restart TDengine server.
-
-For example, if using `systemctl` , the commands to start, stop, restart and check TDengine server are below:
-
-- Start server:`systemctl start taosd`
-
-- Stop server:`systemctl stop taosd`
-
-- Restart server:`systemctl restart taosd`
-
-- Check server status:`systemctl status taosd`
-
-Another component named as `taosAdapter` is to provide HTTP service for TDengine, it should be started and stopped using `systemctl`.
-
-If the server process is OK, the output of `systemctl status` is like below:
-
-```
-Active: active (running)
-```
-
-Otherwise, the output is as below:
-
-```
-Active: inactive (dead)
-```
## Upgrade
-
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
-
- Stop inserting data
- Make sure all data is persisted to disk
-- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.)
- Stop the cluster of TDengine
- Uninstall old version and install new version
- Start the cluster of TDengine
-- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
+- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
- Run some simple data insertion statements to make sure the cluster works well
- Restore business services
:::warning
-
TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version.
:::
diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx
index 74ba78b7fc..ce28ee87d9 100644
--- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx
+++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx
@@ -18,12 +18,12 @@ If the TDengine server is already installed, it can be verified as follows:
The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment.
-The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
+The following example lists all databases on the host h1.tdengine.com. To use it in your environment, replace `h1.tdengine.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
```bash
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
-d "select name, ntables, status from information_schema.ins_databases;" \
- h1.taosdata.com:6041/rest/sql
+ h1.tdengine.com:6041/rest/sql
```
The following return value results indicate that the verification passed.
diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx
index 0f977393f1..129d90ea85 100644
--- a/docs/en/14-reference/03-connector/04-java.mdx
+++ b/docs/en/14-reference/03-connector/04-java.mdx
@@ -133,8 +133,6 @@ The configuration parameters in the URL are as follows:
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is true. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large.
- batchErrorIgnore:true: When executing statement executeBatch, if there is a SQL execution failure in the middle, the following SQL will continue to be executed. false: No more statements after the failed SQL are executed. The default value is: false.
-For more information about JDBC native connections, see [Video Tutorial](https://www.taosdata.com/blog/2020/11/11/1955.html).
-
**Connect using the TDengine client-driven configuration file **
When you use a JDBC native connection to connect to a TDengine cluster, you can use the TDengine client driver configuration file to specify parameters such as `firstEp` and `secondEp` of the cluster in the configuration file as below:
diff --git a/docs/en/14-reference/03-connector/09-csharp.mdx b/docs/en/14-reference/03-connector/09-csharp.mdx
index c745b8dd1a..823e907599 100644
--- a/docs/en/14-reference/03-connector/09-csharp.mdx
+++ b/docs/en/14-reference/03-connector/09-csharp.mdx
@@ -172,7 +172,6 @@ namespace TDengineExample
`Taos` is an ADO.NET connector for TDengine, supporting Linux and Windows platforms. Community contributor `Maikebing@@maikebing contributes the connector`. Please refer to:
* Interface download:
-* Usage notes:
## Frequently Asked Questions
diff --git a/docs/en/14-reference/03-connector/_preparation.mdx b/docs/en/14-reference/03-connector/_preparation.mdx
index 07ebdbca3d..c6e42ce023 100644
--- a/docs/en/14-reference/03-connector/_preparation.mdx
+++ b/docs/en/14-reference/03-connector/_preparation.mdx
@@ -2,7 +2,7 @@
:::info
-Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding [Windows client](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) for TDengine.
+Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package).
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.
diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md
index 2105ba83fa..e73441a96b 100644
--- a/docs/en/14-reference/06-taosdump.md
+++ b/docs/en/14-reference/06-taosdump.md
@@ -116,5 +116,4 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
Mandatory or optional arguments to long options are also mandatory or optional
for any corresponding short options.
-Report bugs to .
```
diff --git a/docs/en/14-reference/07-tdinsight/index.md b/docs/en/14-reference/07-tdinsight/index.md
index e74c9de7b2..2e56203525 100644
--- a/docs/en/14-reference/07-tdinsight/index.md
+++ b/docs/en/14-reference/07-tdinsight/index.md
@@ -263,7 +263,7 @@ Once the import is complete, the full page view of TDinsight is shown below.
## TDinsight dashboard details
-The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster) or databases.
+The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](../../taos-sql/node/) or databases.
Details of the metrics are as follows.
diff --git a/docs/en/14-reference/11-docker/index.md b/docs/en/14-reference/11-docker/index.md
index be1d72ff9c..7cd1e810dc 100644
--- a/docs/en/14-reference/11-docker/index.md
+++ b/docs/en/14-reference/11-docker/index.md
@@ -116,7 +116,7 @@ If you want to start your application in a container, you need to add the corres
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -217,7 +217,7 @@ Here is the full Dockerfile:
```docker
FROM golang:1.17.6-buster as builder
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -233,7 +233,7 @@ RUN go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md
index 9cb2ef5125..02921c3f6a 100644
--- a/docs/en/14-reference/12-config/index.md
+++ b/docs/en/14-reference/12-config/index.md
@@ -380,6 +380,35 @@ The charset that takes effect is UTF-8.
| Unit | bytes |
| Value Range | 0: always compress; >0: only compress when the size of any column data exceeds the threshold; -1: always uncompress |
| Default Value | -1 |
+| Default Value | -1 |
+| Note | available from version 2.3.0.0 | |
+
+## Continuous Query Parameters |
+
+### minSlidingTime
+
+| Attribute | Description |
+| ------------- | -------------------------------------------------------- |
+| Applicable | Server Only |
+| Meaning | Minimum sliding time of time window |
+| Unit | millisecond or microsecond , depending on time precision |
+| Value Range | 10-1000000 |
+| Default Value | 10 |
+
+### minIntervalTime
+
+| Attribute | Description |
+| ------------- | --------------------------- |
+| Applicable | Server Only |
+| Meaning | Minimum size of time window |
+| Unit | millisecond |
+| Value Range | 1-1000000 |
+| Default Value | 10 |
+
+:::info
+To prevent system resource from being exhausted by multiple concurrent streams, a random delay is applied on each stream automatically. `maxFirstStreamCompDelay` is the maximum delay time before a continuous query is started the first time. `streamCompDelayRatio` is the ratio for calculating delay time, with the size of the time window as base. `maxStreamCompDelay` is the maximum delay time. The actual delay time is a random time not bigger than `maxStreamCompDelay`. If a continuous query fails, `retryStreamComDelay` is the delay time before retrying it, also not bigger than `maxStreamCompDelay`.
+
+:::
## Log Parameters
diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md
index 816ebe0047..4f50c38cbb 100644
--- a/docs/en/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/en/14-reference/13-schemaless/13-schemaless.md
@@ -1,6 +1,6 @@
---
title: Schemaless Writing
-description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface."
+description: 'The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface.'
---
In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing
@@ -25,7 +25,7 @@ where:
- measurement will be used as the data table name. It will be separated from tag_set by a comma.
- `tag_set` will be used as tags, with format like `=,=` Enter a space between `tag_set` and `field_set`.
- `field_set`will be used as data columns, with format like `=,=` Enter a space between `field_set` and `timestamp`.
-- `timestamp` is the primary key timestamp corresponding to this row of data
+- `timestamp` is the primary key timestamp corresponding to this row of data
All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes (").
@@ -36,14 +36,14 @@ In the schemaless writing data line protocol, each data item in the field_set ne
- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character)
- Numeric types will be distinguished from data types by the suffix.
-| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
-| -------- | -------- | ------------ | -------------- |
-| 1 | None or f64 | double | 8 |
-| 2 | f32 | float | 4 |
-| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
-| 4 | i16/u16 | SmallInt/USmallInt | 2 |
-| 5 | i32/u32 | Int/UInt | 4 |
-| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
+| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
+| ----------------- | ----------- | ----------------------------- | ---------------- |
+| 1 | None or f64 | double | 8 |
+| 2 | f32 | float | 4 |
+| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
+| 4 | i16/u16 | SmallInt/USmallInt | 2 |
+| 5 | i32/u32 | Int/UInt | 4 |
+| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
@@ -61,7 +61,7 @@ Note that if the wrong case is used when describing the data type suffix, or if
Schemaless writes process row data according to the following principles.
-1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
+1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
@@ -82,7 +82,7 @@ You can configure smlChildTableName to specify table names, for example, `smlChi
:::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
-16KB. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
+16KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
:::
@@ -90,23 +90,23 @@ All processing logic of schemaless will still follow TDengine's underlying restr
Three specified modes are supported in the schemaless writing process, as follows:
-| **Serial** | **Value** | **Description** |
-| -------- | ------------------- | ------------------------------- |
-| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
-| 2 | SML_TELNET_PROTOCOL | OpenTSDB file protocol |
-| 3 | SML_JSON_PROTOCOL | OpenTSDB JSON protocol |
+| **Serial** | **Value** | **Description** |
+| ---------- | ------------------- | ---------------------- |
+| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
+| 2 | SML_TELNET_PROTOCOL | OpenTSDB file protocol |
+| 3 | SML_JSON_PROTOCOL | OpenTSDB JSON protocol |
In InfluxDB line protocol mode, you must specify the precision of the input timestamp. Valid precisions are described in the following table.
-| **No.** | **Precision** | **Description** |
-| -------- | --------------------------------- | -------------- |
-| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) |
-| 2 | TSDB_SML_TIMESTAMP_HOURS | Hours |
-| 3 | TSDB_SML_TIMESTAMP_MINUTES | Minutes |
-| 4 | TSDB_SML_TIMESTAMP_SECONDS | Seconds |
-| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | Milliseconds |
-| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | Microseconds |
-| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | Nanoseconds |
+| **No.** | **Precision** | **Description** |
+| ------- | --------------------------------- | --------------------- |
+| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) |
+| 2 | TSDB_SML_TIMESTAMP_HOURS | Hours |
+| 3 | TSDB_SML_TIMESTAMP_MINUTES | Minutes |
+| 4 | TSDB_SML_TIMESTAMP_SECONDS | Seconds |
+| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | Milliseconds |
+| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | Microseconds |
+| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | Nanoseconds |
In OpenTSDB file and JSON protocol modes, the precision of the timestamp is determined from its length in the standard OpenTSDB manner. User input is ignored.
diff --git a/docs/en/14-reference/14-taosKeeper.md b/docs/en/14-reference/14-taosKeeper.md
index 476b5a1fd2..665bc75380 100644
--- a/docs/en/14-reference/14-taosKeeper.md
+++ b/docs/en/14-reference/14-taosKeeper.md
@@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
-description: Instructions and tips for using taosKeeper
+description: exports TDengine monitoring metrics.
---
## Introduction
@@ -22,26 +22,35 @@ You can compile taosKeeper separately and install it. Please refer to the [taosK
### Configuration and running methods
-
-taosKeeper needs to be executed on the terminal of the operating system. To run taosKeeper, see [configuration file](#configuration-file-parameters-in-detail).
+taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
**Make sure that the TDengine cluster is running correctly before running taosKeeper. ** Ensure that the monitoring service in TDengine has been started. For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
-
+### Environment variable
+
+You can use Environment variable to run taosKeeper and control its behavior:
+
+```shell
+$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
+
+$ taoskeeper
+```
+
+you can run `taoskeeper -h` for more detail.
+
### Configuration File
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
```shell
-taoskeeper -c
+$ taoskeeper -c
```
**Sample configuration files**
@@ -110,7 +119,7 @@ Query OK, 1 rows in database (0.036162s)
#### Export Monitoring Metrics
```shell
-curl http://127.0.0.1:6043/metrics
+$ curl http://127.0.0.1:6043/metrics
```
Sample result set (excerpt):
diff --git a/docs/en/20-third-party/09-emq-broker.md b/docs/en/20-third-party/09-emq-broker.md
index 0900dd3d75..2ead1bbaf4 100644
--- a/docs/en/20-third-party/09-emq-broker.md
+++ b/docs/en/20-third-party/09-emq-broker.md
@@ -9,7 +9,7 @@ MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emq
The following preparations are required for EMQX to add TDengine data sources correctly.
- The TDengine cluster is deployed and working properly
-- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
+- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
- If you use the emulated writers described later, you need to install the appropriate version of Node.js. V12 is recommended.
## Install and start EMQX
@@ -28,8 +28,6 @@ USE test;
CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP);
```
-Note: The table schema is based on the blog [(In Chinese) Data Transfer, Storage, Presentation, EMQX + TDengine Build MQTT IoT Data Visualization Platform](https://www.taosdata.com/blog/2020/08/04/1722.html) as an example. Subsequent operations are carried out with this blog scenario too. Please modify it according to your actual application scenario.
-
## Configuring EMQX Rules
Since the configuration interface of EMQX differs from version to version, here is v4.4.5 as an example. For other versions, please refer to the corresponding official documentation.
@@ -137,5 +135,5 @@ Use the TDengine CLI program to log in and query the appropriate databases and t

-Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine.
+Please refer to the [TDengine official documentation](https://docs.tdengine.com/) for more details on how to use TDengine.
EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX.
diff --git a/docs/en/20-third-party/12-google-data-studio.md b/docs/en/20-third-party/12-google-data-studio.md
new file mode 100644
index 0000000000..fc94f98056
--- /dev/null
+++ b/docs/en/20-third-party/12-google-data-studio.md
@@ -0,0 +1,36 @@
+---
+sidebar_label: Google Data Studio
+title: Use Google Data Studio to access TDengine
+---
+
+Data Studio is a powerful tool for reporting and visualization, offering a wide variety of charts and connectors and making it easy to generate reports based on predefined templates. Its ease of use and robust ecosystem have made it one of the first choices for people working in data analysis.
+
+TDengine is a high-performance, scalable time-series database that supports SQL. Many businesses and developers in fields spanning from IoT and Industry Internet to IT and finance are using TDengine as their time-series database management solution.
+
+The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
+
+With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
+
+
+
+Select the TDengine connector and click Authorize.
+
+
+
+Then sign in to your Google Account and click Allow to enable the connection to TDengine.
+
+
+
+In the Enter URL field, type the hostname and port of the server running the TDengine REST service. In the following fields, type your username, password, database name, table name, and the start and end times of your query range. Then, click Connect.
+
+
+
+After the connection is established, you can use Data Studio to process your data and create reports.
+
+
+
+In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data – some examples are shown below.
+
+
+
+With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
diff --git a/docs/en/20-third-party/gds/gds-01.webp b/docs/en/20-third-party/gds/gds-01.webp
new file mode 100644
index 0000000000..2e5f9e4ff5
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-01.webp differ
diff --git a/docs/en/20-third-party/gds/gds-02.png.webp b/docs/en/20-third-party/gds/gds-02.png.webp
new file mode 100644
index 0000000000..3b3537f5a4
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-02.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-03.png.webp b/docs/en/20-third-party/gds/gds-03.png.webp
new file mode 100644
index 0000000000..5719436d5b
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-03.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-04.png.webp b/docs/en/20-third-party/gds/gds-04.png.webp
new file mode 100644
index 0000000000..ddaae5c1a6
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-04.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-05.png.webp b/docs/en/20-third-party/gds/gds-05.png.webp
new file mode 100644
index 0000000000..9a917678fc
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-05.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-06.png.webp b/docs/en/20-third-party/gds/gds-06.png.webp
new file mode 100644
index 0000000000..c76b68d32b
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-06.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-07.png.webp b/docs/en/20-third-party/gds/gds-07.png.webp
new file mode 100644
index 0000000000..1386ae9c4d
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-07.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-08.png.webp b/docs/en/20-third-party/gds/gds-08.png.webp
new file mode 100644
index 0000000000..59dcf8b31d
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-08.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-09.png.webp b/docs/en/20-third-party/gds/gds-09.png.webp
new file mode 100644
index 0000000000..b94439f211
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-09.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-10.png.webp b/docs/en/20-third-party/gds/gds-10.png.webp
new file mode 100644
index 0000000000..a63cad9e9a
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-10.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-11.png.webp b/docs/en/20-third-party/gds/gds-11.png.webp
new file mode 100644
index 0000000000..fc38cd9a29
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-11.png.webp differ
diff --git a/docs/en/21-tdinternal/01-arch.md b/docs/en/21-tdinternal/01-arch.md
index 44651c0496..2f876adffc 100644
--- a/docs/en/21-tdinternal/01-arch.md
+++ b/docs/en/21-tdinternal/01-arch.md
@@ -12,6 +12,7 @@ The design of TDengine is based on the assumption that any hardware or software
Logical structure diagram of TDengine's distributed architecture is as follows:

+
Figure 1: TDengine architecture diagram
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
@@ -38,15 +39,16 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted.
-**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
+**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
-1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
+1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step;
3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
-**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
+**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
+
- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
@@ -57,6 +59,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process.

+
Figure 2: Typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
@@ -121,16 +124,17 @@ The load balancing process does not require any manual intervention, and it is t
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect.
-### Leader vnode Writing Process
+### Leader vnode Writing Process
Leader Vnode uses a writing process as follows:

+
Figure 3: TDengine Leader writing process
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
-3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
+3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Write into memory and add the record to “skip list”;
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
@@ -140,6 +144,7 @@ Leader Vnode uses a writing process as follows:
For a follower vnode, the write process as follows:

+
Figure 4: TDengine Follower Writing Process
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
@@ -212,6 +217,7 @@ When data is written to disk, the system decideswhether to compress the data bas
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
dataDir format is as follows:
+
```
dataDir data_path [tier_level]
```
@@ -270,6 +276,7 @@ For the data collected by device D1001, the number of records per hour is counte
TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure:

+
Figure 5: Diagram of multi-table aggregation query
1. Application sends a query condition to system;
@@ -279,9 +286,8 @@ TDengine creates a separate table for each data collection point, but in practic
5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC;
6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application.
-Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
+Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TDengine SQL for details.
### Precomputation
In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL.
-
diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/03-immigrate.md
index 9614574c71..1aabaa43e7 100644
--- a/docs/en/25-application/03-immigrate.md
+++ b/docs/en/25-application/03-immigrate.md
@@ -41,7 +41,7 @@ The agents deployed in the application nodes are responsible for providing opera
- **TDengine installation and deployment**
-First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html).
+First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to [Install TDengine](../../get-started/package)
Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters.
@@ -51,7 +51,7 @@ TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a
Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios.
-Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/).
+Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](../../reference/taosadapter/).
If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows.
@@ -411,7 +411,7 @@ TDengine provides a wealth of help documents to explain many aspects of cluster
### Cluster Deployment
-The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to the blog ["Installation and Uninstallation of Various Installation Packages of TDengine"](https://www.taosdata.com/blog/2019/08/09/566.html) for the various installation package formats.
+The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to [Install TDengine](../../get-started/package) for more details.
Note that once the installation is complete, do not immediately start the `taosd` service, but start it after correctly configuring the parameters.
diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md
index 79d5424ac2..f9127121f3 100644
--- a/docs/zh/01-index.md
+++ b/docs/zh/01-index.md
@@ -4,22 +4,22 @@ sidebar_label: 文档首页
slug: /
---
-TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time Series Database , TSDB ), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
+TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time Series Database , TSDB ), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。
-TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。
+TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[基本概念](./concept)一章。
-如果你是开发者,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要把示例代码拷贝粘贴,针对自己的应用稍作改动,就能跑起来。
+如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。
-我们已经生活在大数据的时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请参考[部署集群](./deployment)一章。
+我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群](./deployment)一章。
-TDengine 采用 SQL 作为其查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
+TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
-如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出,配置参数,怎么监测 TDengine 是否健康运行,怎么提升系统运行的性能,那么请仔细参考[运维指南](./operation)一章。
+如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。
-如果你对 TDengine 外围工具,REST API, 各种编程语言的连接器想做更多详细了解,请看[参考指南](./reference)一章。
+如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。
-如果你对 TDengine 内部的架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
+如果你对 TDengine 的内部架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
-最后,作为一个开源软件,欢迎大家的参与。如果发现文档的任何错误,描述不清晰的地方,都请在每个页面的最下方,点击“编辑本文档“直接进行修改。
+最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
Together, we make a difference!
diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md
index f6779b8776..012c49d2c3 100644
--- a/docs/zh/02-intro.md
+++ b/docs/zh/02-intro.md
@@ -4,72 +4,95 @@ description: 简要介绍 TDengine 的主要功能
toc_max_heading_level: 2
---
-TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time Series Database , TSDB )。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
+TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
-本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。
+本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
## 主要功能
-TDengine的主要功能如下:
+TDengine 的主要功能如下:
-1. 高速数据写入,除 [SQL 写入](../develop/insert-data/sql-writing)外,还支持 [Schemaless 写入](../reference/schemaless/),支持 [InfluxDB LINE 协议](../develop/insert-data/influxdb-line),[OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json)等协议写入;
-2. 第三方数据采集工具 [Telegraf](../third-party/telegraf),[Prometheus](../third-party/prometheus),[StatsD](../third-party/statsd),[collectd](../third-party/collectd),[icinga2](../third-party/icinga2), [TCollector](../third-party/tcollector), [EMQ](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker) 等都可以进行配置后,不用任何代码,即可将数据写入;
-3. 支持[各种查询](../develop/query-data),包括聚合查询、嵌套查询、降采样查询、插值等
-4. 支持[用户自定义函数](../develop/udf)
-5. 支持[缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis
-6. 支持[流式计算](../develop/stream)(Stream Processing)
-7. 支持[数据订阅](../develop/tmq),而且可以指定过滤条件
-8. 支持[集群](../deployment/),可以通过多节点进行水平扩展,并通过多副本实现高可靠
-9. 提供[命令行程序](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
-10. 提供多种数据的[导入](../operation/import)、[导出](../operation/export)
-11. 支持对[TDengine 集群本身的监控](../operation/monitor)
-12. 提供各种语言的[连接器](../connector): 如 C/C++, Java, Go, Node.JS, Rust, Python, C# 等
-13. 支持 [REST 接口](../connector/rest-api/)
-14. 支持与[ Grafana 无缝集成](../third-party/grafana)
-15. 支持与 Google Data Studio 无缝集成
-16. 支持 [Kubernetes 部署](../deployment/k8s)
+1. 写入数据,支持
+ - [SQL 写入](../develop/insert-data/sql-writing)
+ - [无模式(Schemaless)写入](../reference/schemaless/),支持多种标准写入协议
+ - [InfluxDB Line 协议](../develop/insert-data/influxdb-line)
+ - [OpenTSDB Telnet 协议](../develop/insert-data/opentsdb-telnet)
+ - [OpenTSDB JSON 协议](../develop/insert-data/opentsdb-json)
+ - 与多种第三方工具的无缝集成,它们都可以仅通过配置而无需任何代码即可将数据写入 TDengine
+ - [Telegraf](../third-party/telegraf)
+ - [Prometheus](../third-party/prometheus)
+ - [StatsD](../third-party/statsd)
+ - [collectd](../third-party/collectd)
+ - [Icinga2](../third-party/icinga2)
+ - [TCollector](../third-party/tcollector)
+ - [EMQX](../third-party/emq-broker)
+ - [HiveMQ](../third-party/hive-mq-broker) ;
+2. 查询数据,支持
+ - [标准 SQL](../taos-sql),含嵌套查询
+ - [时序数据特色函数](../taos-sql/function/#time-series-extensions)
+ - [时序数据特色查询](../taos-sql/distinguished),例如降采样、插值、累加和、时间加权平均、状态窗口、会话窗口等
+ - [用户自定义函数(UDF)](../taos-sql/udf)
+3. [缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis 就能对时序数据进行高效处理
+4. [流式计算(Stream Processing)](../develop/stream),TDengine 不仅支持连续查询,还支持事件驱动的流式计算,这样在处理时序数据时就无需 Flink 或 Spark 这样流式计算组件
+5. [数据订阅](../develop/tmq),应用程序可以订阅一张表或一组表的数据,提供与 Kafka 相同的 API,而且可以指定过滤条件
+6. 可视化
+ - 支持与 [Grafana](../third-party/grafana/) 的无缝集成
+ - 支持与 Google Data Studio 的无缝集成
+7. 集群
+ - [集群部署](../deployment/),可以通过增加节点进行水平扩展以提升处理能力
+ - 可以通过 [Kubernetes 部署 TDengine](../deployment/k8s/)
+ - 通过多副本提供高可用能力
+8. 管理
+ - [监控](../operation/monitor)运行中的 TDengine 实例
+ - 多种[数据导入](../operation/import)方式
+ - 多种[数据导出](../operation/export)方式
+9. 工具
+ - 提供[交互式命令行程序(CLI)](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
+ - 提供压力测试工具[taosBenchmark](../reference/taosbenchmark),用于测试 TDengine 的性能
+10. 编程
+ - 提供各种语言的[连接器(Connector)](../connector): 如 [C/C++](../connector/cpp)、[Java](../connector/java)、[Go](../connector/go)、[Node.js](../connector/node)、[Rust](../connector/rust)、[Python](../connector/python)、[C#](../connector/csharp) 等
+ - 支持 [REST 接口](../connector/rest-api/)
-更多细小的功能,请阅读整个文档。
+更多细节功能,请阅读整个文档。
## 竞争优势
-由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,设计了全新的针对时序数据的存储引擎和计算引擎,因此与其他时序数据库相比,TDengine 有以下特点:
+由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,因此与其他时序数据库相比,TDengine 有以下特点:
-- **[高性能](https://www.taosdata.com/tdengine/fast)**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10。
+- **[高性能](https://www.taosdata.com/tdengine/fast)**:TDengine 是唯一一个解决了时序数据存储的高基数难题的时序数据库,支持上亿数据采集点,并在数据插入、查询和数据压缩上远胜其它时序数据库。
-- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。
+- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建缓存、流式计算和数据订阅等功能,为时序数据的处理提供了极简的解决方案,从而大幅降低了业务系统的设计复杂度和运维成本。
-- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
+- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生的分布式设计、数据分片和分区、存算分离、RAFT 协议、Kubernetes 部署和完整的可观测性,TDengine 是一款云原生时序数据库并且能够部署在公有云、私有云和混合云上。
-- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术,TDengine 具备强大的分析能力。
+- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:对系统管理员来说,TDengine 大幅降低了管理和维护的代价。对开发者来说, TDengine 提供了简单的接口、极简的解决方案和与第三方工具的无缝集成。对数据分析专家来说,TDengine 提供了便捷的数据访问能力。
-- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:无任何依赖,安装、集群几秒搞定;提供REST以及各种语言连接器,与众多第三方工具无缝集成;提供命令行程序,便于管理和即席查询;提供各种运维工具。
+- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:通过超级表、存储计算分离、分区分片、预计算和其它技术,TDengine 能够高效地浏览、格式化和访问数据。
-- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
+- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部在开源协议下公开。全球超过 140k 个运行实例,GitHub Star 19k,且拥有一个活跃的开发者社区。
采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面:
-1. 由于其超强性能,它能将系统需要的计算资源和存储资源大幅降低
+1. 由于其超强性能,它能将系统所需的计算资源和存储资源大幅降低
2. 因为支持 SQL,能与众多第三方软件无缝集成,学习迁移成本大幅下降
-3. 因为是一极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
-4. 因为维护简单,运营维护成本能大幅降低
+3. 因为是一款极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
## 技术生态
-在整个时序大数据平台中,TDengine 在其中扮演的角色如下:
+在整个时序大数据平台中,TDengine 扮演的角色如下:

+图 1. TDengine技术生态图
-图 1. TDengine技术生态图
-上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka, 他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序 (CLI) 以及可视化管理管理。
+上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。
-## 总体适用场景
+## 典型适用场景
-作为一个高性能、分布式、支持 SQL 的时序数据库 (Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。本文对适用场景做更多详细的分析。
+作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
### 数据源特点和需求
@@ -91,18 +114,18 @@ TDengine的主要功能如下:
### 系统功能需求
-| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
-| -------------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- |
-| 要求完整的内置数据处理算法 | | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。 |
-| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。 |
+| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
+| -------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------- |
+| 要求完整的内置数据处理算法 | | √ | | TDengine 实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有需求,因此特殊类型的处理需求还需要在应用层面解决。 |
+| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据库处理,或者应该考虑 TDengine 和关系型数据库配合实现系统功能。 |
### 系统性能需求
-| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
-| ---------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------ |
-| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
-| 要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
-| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
+| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
+| ---------------------- | ------ | -------- | -------- | -------------------------------------------------------------------------------------------------- |
+| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
+| 要求高速处理数据 | | | √ | TDengine 专门为 IoT 优化的存储和数据处理设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
+| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
### 系统维护需求
diff --git a/docs/zh/07-develop/02-model/index.mdx b/docs/zh/07-develop/02-model/index.mdx
index 634c8a98d4..d66059c2cd 100644
--- a/docs/zh/07-develop/02-model/index.mdx
+++ b/docs/zh/07-develop/02-model/index.mdx
@@ -41,7 +41,7 @@ USE power;
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
```
-与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TAOS SQL 的超级表管理](/taos-sql/stable) 章节。
+与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TDengine SQL 的超级表管理](/taos-sql/stable) 章节。
每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。
@@ -55,7 +55,7 @@ TDengine 对每个数据采集点需要独立建表。与标准的关系型数
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
```
-其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。
+其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TDengine SQL 的表管理](/taos-sql/table) 章节。
TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序列号)。但对于有的场景,并没有唯一的 ID,可以将多个 ID 组合成一个唯一的 ID。不建议将具有唯一性的 ID 作为标签值。
diff --git a/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx b/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx
index 2920fa35a4..8818eaae3d 100644
--- a/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx
+++ b/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx
@@ -26,6 +26,7 @@ import PhpStmt from "./_php_stmt.mdx";
应用通过连接器执行 INSERT 语句来插入数据,用户还可以通过 TDengine CLI,手动输入 INSERT 语句插入数据。
### 一次写入一条
+
下面这条 INSERT 就将一条记录写入到表 d1001 中:
```sql
@@ -48,7 +49,7 @@ TDengine 也支持一次向多个表写入数据,比如下面这条命令就
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
```
-详细的 SQL INSERT 语法规则参考 [TAOS SQL 的数据写入](/taos-sql/insert)。
+详细的 SQL INSERT 语法规则参考 [TDengine SQL 的数据写入](/taos-sql/insert)。
:::info
@@ -134,4 +135,3 @@ TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 类似,这
-
diff --git a/docs/zh/07-develop/03-insert-data/05-high-volume.md b/docs/zh/07-develop/03-insert-data/05-high-volume.md
index 32be8cb890..d7581467ae 100644
--- a/docs/zh/07-develop/03-insert-data/05-high-volume.md
+++ b/docs/zh/07-develop/03-insert-data/05-high-volume.md
@@ -11,7 +11,7 @@ import TabItem from "@theme/TabItem";
从客户端程序的角度来说,高效写入数据要考虑以下几个因素:
-1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前,TDengine 支持的一条 SQL 的最大长度为 1,048,576(1M)个字符。可通过配置客户端参数 maxSQLLength(默认值为 65480)进行修改。
+1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前,TDengine 支持的一条 SQL 的最大长度为 1,048,576(1M)个字符。
2. 并发连接数。一般来讲,同时写入数据的并发连接数越多写入越高效(但超过一定阈值反而会下降,取决于服务端处理能力)。
3. 数据在不同表(或子表)之间的分布,即要写入数据的相邻性。一般来说,每批次只向同一张表(或子表)写入数据比向多张表(或子表)写入数据要更高效;
4. 写入方式。一般来讲:
@@ -38,13 +38,9 @@ import TabItem from "@theme/TabItem";
### 服务器配置的角度 {#setting-view}
-从服务器配置的角度来说,也有很多优化写入性能的方法。
+从服务端配置的角度,要根据系统中磁盘的数量,磁盘的 I/O 能力,以及处理器能力在创建数据库时设置适当的 vgroups 数量以充分发挥系统性能。如果 vgroups 过少,则系统性能无法发挥;如果 vgroups 过多,会造成无谓的资源竞争。常规推荐 vgroups 数量为 CPU 核数的 2 倍,但仍然要结合具体的系统资源配置进行调优。
-如果总表数不多(远小于核数乘以1000), 且无论怎么调节客户端程序,taosd 进程的 CPU 使用率都很低,那么很可能是因为表在各个 vgroup 分布不均。比如:数据库总表数是 1000 且 minTablesPerVnode 设置的也是 1000,那么所有的表都会分布在 1 个 vgroup 上。此时如果将 minTablesPerVnode 和 tablelncStepPerVnode 都设置成 100, 则可将表分布至 10 个 vgroup。(假设 maxVgroupsPerDb 大于等于 10)。
-
-如果总表数比较大(比如大于500万),适当增加 maxVgroupsPerDb 也能显著提高建表的速度。maxVgroupsPerDb 默认值为 0, 自动配置为 CPU 的核数。 如果表的数量巨大,也建议调节 maxTablesPerVnode 参数,以免超过单个 vnode 建表的上限。
-
-更多调优参数,请参考 [配置参考](../../../reference/config)部分。
+更多调优参数,请参考 [数据库管理](../../../taos-sql/database) 和 [服务端配置](../../../reference/config)。
## 高效写入示例 {#sample-code}
@@ -352,7 +348,7 @@ main 函数可以接收 5 个启动参数,依次是:
-SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL, 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,如果接近 SQL 最大长度限制(maxSQLLength),将会立即执行 SQL。为了减少 SQL 此时,建议将 maxSQLLength 适当调大。
+SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL, 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,根据 TDengine 3.0 的限制由输入参数 maxSQLLength 传入了支持的最大 SQL 长度,即 1048576 。
SQLWriter
diff --git a/docs/zh/07-develop/04-query-data/index.mdx b/docs/zh/07-develop/04-query-data/index.mdx
index 92cb1906d9..d6156c8a59 100644
--- a/docs/zh/07-develop/04-query-data/index.mdx
+++ b/docs/zh/07-develop/04-query-data/index.mdx
@@ -44,7 +44,7 @@ Query OK, 2 row(s) in set (0.001100s)
为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。
-具体的查询语法请看 [TAOS SQL 的数据查询](../../taos-sql/select) 章节。
+具体的查询语法请看 [TDengine SQL 的数据查询](../../taos-sql/select) 章节。
## 多表聚合查询
@@ -75,7 +75,7 @@ taos> SELECT count(*), max(current) FROM meters where groupId = 2;
Query OK, 1 row(s) in set (0.002136s)
```
-在 [TAOS SQL 的数据查询](../../taos-sql/select) 一章,查询类操作都会注明是否支持超级表。
+在 [TDengine SQL 的数据查询](../../taos-sql/select) 一章,查询类操作都会注明是否支持超级表。
## 降采样查询、插值
@@ -123,7 +123,7 @@ Query OK, 6 rows in database (0.005515s)
如果一个时间间隔里,没有采集的数据,TDengine 还提供插值计算的功能。
-语法规则细节请见 [TAOS SQL 的按时间窗口切分聚合](../../taos-sql/distinguished) 章节。
+语法规则细节请见 [TDengine SQL 的按时间窗口切分聚合](../../taos-sql/distinguished) 章节。
## 示例代码
diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md
index 0c305231e0..d8ff3f04ed 100644
--- a/docs/zh/12-taos-sql/06-select.md
+++ b/docs/zh/12-taos-sql/06-select.md
@@ -355,19 +355,15 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
:::info
-- 目前仅支持一层嵌套,也即不能在子查询中再嵌入子查询。
-- 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表可以使用 AS 语法做重命名,以便于外层查询中方便引用。
-- 目前不能在“连续查询”功能中使用子查询。
+- 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表建议起别名,以便于外层查询中方便引用。
- 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。
-- 目前内层查询、外层查询均不支持 UNION 操作。
- 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。
- 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。
- 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制:
- 计算函数部分:
- - 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。
- - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。
- - 外层查询中不支持 IN 算子,但在内层中可以使用。
- - 外层查询不支持 GROUP BY。
+ - 如果内层查询的结果数据未提供时间戳,那么计算过程隐式依赖时间戳的函数在外层会无法正常工作。例如:INTERP, DERIVATIVE, IRATE, LAST_ROW, FIRST, LAST, TWA, STATEDURATION, TAIL, UNIQUE。
+ - 如果内层查询的结果数据不是有效的时间序列,那么计算过程依赖数据为时间序列的函数在外层会无法正常工作。例如:LEASTSQUARES, ELAPSED, INTERP, DERIVATIVE, IRATE, TWA, DIFF, STATECOUNT, STATEDURATION, CSUM, MAVG, TAIL, UNIQUE。
+ - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:PERCENTILE。
:::
diff --git a/docs/zh/12-taos-sql/12-distinguished.md b/docs/zh/12-taos-sql/12-distinguished.md
index b9e06033d6..016c1929fe 100644
--- a/docs/zh/12-taos-sql/12-distinguished.md
+++ b/docs/zh/12-taos-sql/12-distinguished.md
@@ -6,11 +6,11 @@ description: TDengine 提供的时序数据特有的查询功能
TDengine 是专为时序数据而研发的大数据平台,存储和计算都针对时序数据的特定进行了量身定制,在支持标准 SQL 的基础之上,还提供了一系列贴合时序业务场景的特色查询语法,极大的方便时序场景的应用开发。
-TDengine 提供的特色查询包括标签切分查询和窗口切分查询。
+TDengine 提供的特色查询包括数据切分查询和窗口切分查询。
-## 标签切分查询
+## 数据切分查询
-超级表查询中,当需要针对标签进行数据切分然后在切分出的数据空间内再进行一系列的计算时使用标签切分子句,标签切分的语句如下:
+当需要按一定的维度对数据进行切分然后在切分出的数据空间内再进行一系列的计算时使用数据切分子句,数据切分语句的语法如下:
```sql
PARTITION BY part_list
@@ -18,22 +18,23 @@ PARTITION BY part_list
part_list 可以是任意的标量表达式,包括列、常量、标量函数和它们的组合。
-当 PARTITION BY 和标签一起使用时,TDengine 按如下方式处理标签切分子句:
+TDengine 按如下方式处理数据切分子句:
-- 标签切分子句位于 WHERE 子句之后,且不能和 JOIN 子句一起使用。
-- 标签切分子句将超级表数据按指定的标签组合进行切分,每个切分的分片进行指定的计算。计算由之后的子句定义(窗口子句、GROUP BY 子句或 SELECT 子句)。
-- 标签切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。
+- 数据切分子句位于 WHERE 子句之后。
+- 数据切分子句将表数据按指定的维度进行切分,每个切分的分片进行指定的计算。计算由之后的子句定义(窗口子句、GROUP BY 子句或 SELECT 子句)。
+- 数据切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。
```sql
select max(current) from meters partition by location interval(10m)
```
+数据切分子句最常见的用法就是在超级表查询中,按标签将子表数据进行切分,然后分别进行计算。特别是 PARTITION BY TBNAME 用法,它将每个子表的数据独立出来,形成一条条独立的时间序列,极大的方便了各种时序场景的统计分析。
## 窗口切分查询
TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)三种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。窗口切分查询语法如下:
```sql
-SELECT function_list FROM tb_name
+SELECT select_list FROM tb_name
[WHERE where_condition]
[SESSION(ts_col, tol_val)]
[STATE_WINDOW(col)]
@@ -43,19 +44,15 @@ SELECT function_list FROM tb_name
在上述语法中的具体限制如下
-### 窗口切分查询中使用函数的限制
-
-- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:DIFF 以及四则运算)。
-- 此外 LAST_ROW 查询也不能与窗口聚合同时出现。
-- 标量函数(如:CEIL/FLOOR 等)也不能使用在窗口聚合查询中。
-
### 窗口子句的规则
-- 窗口子句位于标签切分子句之后,GROUP BY 子句之前,且不可以和 GROUP BY 子句一起使用。
+- 窗口子句位于数据切分子句之后,GROUP BY 子句之前,且不可以和 GROUP BY 子句一起使用。
- 窗口子句将数据按窗口进行切分,对每个窗口进行 SELECT 列表中的表达式的计算,SELECT 列表中的表达式只能包含:
- 常量。
- - 聚集函数。
+ - _wstart伪列、_wend伪列和_wduration伪列。
+ - 聚集函数(包括选择函数和可以由参数确定输出行数的时序特有函数)。
- 包含上面表达式的表达式。
+ - 且至少包含一个聚集函数。
- 窗口子句不可以和 GROUP BY 子句一起使用。
- WHERE 语句可以指定查询的起止时间和其他过滤条件。
@@ -74,7 +71,7 @@ FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填
1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
-3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。
+3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 PARTITION BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 PARTITION BY 语句分组,则返回结果中每个 PARTITION 内不按照时间序列严格单调递增。
:::
@@ -106,7 +103,7 @@ SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
### 状态窗口
-使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用)
+使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。

@@ -122,7 +119,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);

-在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用)
+在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。
```
diff --git a/docs/zh/12-taos-sql/25-grant.md b/docs/zh/12-taos-sql/25-grant.md
index 6f7024d32e..7fb9447101 100644
--- a/docs/zh/12-taos-sql/25-grant.md
+++ b/docs/zh/12-taos-sql/25-grant.md
@@ -9,14 +9,51 @@ description: 企业版中才具有的权限管理功能
## 创建用户
```sql
-CREATE USER use_name PASS 'password';
+CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
```
创建用户。
-use_name最长为23字节。
+use_name 最长为 23 字节。
-password最长为128字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
+password 最长为 128 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
+
+SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。
+
+例如,创建密码为123456且可以查看系统信息的用户test如下:
+
+```sql
+taos> create user test pass '123456' sysinfo 1;
+Query OK, 0 of 0 rows affected (0.001254s)
+```
+
+## 查看用户
+
+```sql
+SHOW USERS;
+```
+
+查看用户信息。
+
+```sql
+taos> show users;
+ name | super | enable | sysinfo | create_time |
+================================================================================
+ test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
+ root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
+Query OK, 2 rows in database (0.001657s)
+```
+
+也可以通过查询INFORMATION_SCHEMA.INS_USERS系统表来查看用户信息,例如:
+
+```sql
+taos> select * from information_schema.ins_users;
+ name | super | enable | sysinfo | create_time |
+================================================================================
+ test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
+ root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
+Query OK, 2 rows in database (0.001953s)
+```
## 删除用户
@@ -37,9 +74,15 @@ alter_user_clause: {
```
- PASS:修改用户密码。
-- ENABLE:修改用户是否启用。1表示启用此用户,0表示禁用此用户。
-- SYSINFO:修改用户是否可查看系统信息。1表示可以查看系统信息,0表示不可以查看系统信息。
+- ENABLE:修改用户是否启用。1 表示启用此用户,0 表示禁用此用户。
+- SYSINFO:修改用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息。
+例如,禁用 test 用户:
+
+```sql
+taos> alter user test enable 0;
+Query OK, 0 of 0 rows affected (0.001160s)
+```
## 授权
@@ -62,7 +105,7 @@ priv_level : {
}
```
-对用户授权。
+对用户授权。授权功能只包含在企业版中。
授权级别支持到DATABASE,权限有READ和WRITE两种。
@@ -92,4 +135,4 @@ priv_level : {
```
-收回对用户的授权。
+收回对用户的授权。授权功能只包含在企业版中。
diff --git a/docs/zh/12-taos-sql/index.md b/docs/zh/12-taos-sql/index.md
index 821679551c..739d26b224 100644
--- a/docs/zh/12-taos-sql/index.md
+++ b/docs/zh/12-taos-sql/index.md
@@ -1,11 +1,11 @@
---
-title: TAOS SQL
-description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容"
+title: TDengine SQL
+description: 'TDengine SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容'
---
-本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
+本文档说明 TDengine SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
-TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TAOS SQL 语句的最大长度为 1M。TAOS SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
+TDengine SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TDengine SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TDengine SQL 语句的最大长度为 1M。TDengine SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
本章节 SQL 语法遵循如下约定:
diff --git a/docs/zh/14-reference/11-docker/index.md b/docs/zh/14-reference/11-docker/index.md
index d712e9aba8..58bbe1e117 100644
--- a/docs/zh/14-reference/11-docker/index.md
+++ b/docs/zh/14-reference/11-docker/index.md
@@ -119,7 +119,7 @@ taos -h tdengine -P 6030
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -234,7 +234,7 @@ go mod tidy
```dockerfile
FROM golang:1.19.0-buster as builder
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -250,7 +250,7 @@ RUN go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
diff --git a/docs/zh/14-reference/13-schemaless/13-schemaless.md b/docs/zh/14-reference/13-schemaless/13-schemaless.md
index ae4280e26a..a33abafaf8 100644
--- a/docs/zh/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/zh/14-reference/13-schemaless/13-schemaless.md
@@ -3,7 +3,7 @@ title: Schemaless 写入
description: 'Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构'
---
-在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless
+在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless
将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,你也可以通过,SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
@@ -36,14 +36,14 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
- 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)
- 数值类型将通过后缀来区分数据类型:
-| **序号** | **后缀** | **映射类型** | **大小(字节)** |
-| -------- | -------- | ------------ | -------------- |
-| 1 | 无或 f64 | double | 8 |
-| 2 | f32 | float | 4 |
-| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
-| 4 | i16/u16 | SmallInt/USmallInt | 2 |
-| 5 | i32/u32 | Int/UInt | 4 |
-| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
+| **序号** | **后缀** | **映射类型** | **大小(字节)** |
+| -------- | ----------- | ----------------------------- | -------------- |
+| 1 | 无或 f64 | double | 8 |
+| 2 | f32 | float | 4 |
+| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
+| 4 | i16/u16 | SmallInt/USmallInt | 2 |
+| 5 | i32/u32 | Int/UInt | 4 |
+| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
- t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。
@@ -69,7 +69,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
-为了让用户可以指定生成的表名,可以通过配置smlChildTableName来指定(比如 配置smlChildTableName=tname 插入数据为st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为cpu1,注意如果多行数据tname相同,但是后面的tag_set不同,则使用第一次自动建表时指定的tag_set,其他的会忽略)。
+为了让用户可以指定生成的表名,可以通过配置 smlChildTableName 来指定(比如 配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一次自动建表时指定的 tag_set,其他的会忽略)。
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
@@ -78,11 +78,11 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
NULL。
6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
-8. 为了提高写入的效率,默认假设同一个超级表中field_set的顺序是一样的(第一条数据包含所有的field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数smlDataFormat为false,否则,数据写入按照相同顺序写入,库中数据会异常。
+8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。
:::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
-16KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
+16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
:::
diff --git a/docs/zh/14-reference/14-taosKeeper.md b/docs/zh/14-reference/14-taosKeeper.md
index f1165c9d0f..ae0a496f03 100644
--- a/docs/zh/14-reference/14-taosKeeper.md
+++ b/docs/zh/14-reference/14-taosKeeper.md
@@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
-description: TDengine taosKeeper 使用说明
+description: TDengine 3.0 版本监控指标的导出工具
---
## 简介
@@ -22,26 +22,36 @@ taosKeeper 安装方式:
### 配置和运行方式
-
-taosKeeper 需要在操作系统终端执行,该工具支持 [配置文件启动](#配置文件启动)。
+taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:[命令行参数](#命令行参数启动)、[环境变量](#环境变量启动) 和 [配置文件](#配置文件启动)。优先级为:命令行参数、环境变量、配置文件参数。
**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。** 并且 TDengine 已经开启监控服务,具体请参考:[TDengine 监控配置](../config/#监控相关)。
-
+
+### 环境变量启动
+
+通过设置环境变量达到控制启动参数的目的,通常在容器中运行时使用。
+
+```shell
+$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
+
+$ taoskeeper
+```
+
+具体参数列表请参照 `taoskeeper -h` 输入结果。
+
### 配置文件启动
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。
```shell
-taoskeeper -c
+$ taoskeeper -c
```
**下面是配置文件的示例:**
@@ -110,7 +120,7 @@ Query OK, 1 rows in database (0.036162s)
#### 导出监控指标
```shell
-curl http://127.0.0.1:6043/metrics
+$ curl http://127.0.0.1:6043/metrics
```
部分结果集:
diff --git a/docs/zh/20-third-party/12-google-data-studio.md b/docs/zh/20-third-party/12-google-data-studio.md
new file mode 100644
index 0000000000..bc06f0ea32
--- /dev/null
+++ b/docs/zh/20-third-party/12-google-data-studio.md
@@ -0,0 +1,39 @@
+---
+sidebar_label: Google Data Studio
+title: TDengine Google Data Studio Connector
+description: 使用 Google Data Studio 存取 TDengine 数据的详细指南
+---
+
+Google Data Studio 是一个强大的报表可视化工具,它提供了丰富的数据图表和数据连接,可以非常方便地按照既定模板生成报表。因其简便易用和生态丰富而在数据分析领域得到一众数据科学家的青睐。
+
+Data Studio 可以支持多种数据来源,除了诸如 Google Analytics、Google AdWords、Search Console、BigQuery 等 Google 自己的服务之外,用户也可以直接将离线文件上传至 Google Cloud Storage,或是通过连接器来接入其它数据源。
+
+
+
+目前 TDengine 连接器已经发布到 Google Data Studio 应用商店,你可以在 “Connect to Data” 页面下直接搜索 TDengine,将其选作数据源。
+
+
+
+接下来选择 AUTHORIZE 按钮。
+
+
+
+设置允许连接自己的账号到外部服务。
+
+
+
+在接下来的页面选择运行 TDengine REST 服务的 URL,并输入用户名、密码、数据库名称、表名称以及查询时间范围,并点击右上角的 CONNECT 按钮。
+
+
+
+连接成功后,就可以使用 GDS 方便地进行数据处理并创建报表了。
+
+
+
+目前的维度和指标规则是:timestamp 类型的字段和 tag 字段会被连接器定义为维度,而其他类型的字段是指标。用户还可以根据自己的需求创建不同的表。
+
+
+
+
+
+
diff --git a/docs/zh/20-third-party/gds/gds-01.webp b/docs/zh/20-third-party/gds/gds-01.webp
new file mode 100644
index 0000000000..2e5f9e4ff5
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-01.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-02.png.webp b/docs/zh/20-third-party/gds/gds-02.png.webp
new file mode 100644
index 0000000000..3b3537f5a4
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-02.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-03.png.webp b/docs/zh/20-third-party/gds/gds-03.png.webp
new file mode 100644
index 0000000000..5719436d5b
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-03.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-04.png.webp b/docs/zh/20-third-party/gds/gds-04.png.webp
new file mode 100644
index 0000000000..ddaae5c1a6
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-04.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-05.png.webp b/docs/zh/20-third-party/gds/gds-05.png.webp
new file mode 100644
index 0000000000..9a917678fc
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-05.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-06.png.webp b/docs/zh/20-third-party/gds/gds-06.png.webp
new file mode 100644
index 0000000000..c76b68d32b
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-06.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-07.png.webp b/docs/zh/20-third-party/gds/gds-07.png.webp
new file mode 100644
index 0000000000..1386ae9c4d
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-07.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-08.png.webp b/docs/zh/20-third-party/gds/gds-08.png.webp
new file mode 100644
index 0000000000..59dcf8b31d
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-08.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-09.png.webp b/docs/zh/20-third-party/gds/gds-09.png.webp
new file mode 100644
index 0000000000..b94439f211
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-09.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-10.png.webp b/docs/zh/20-third-party/gds/gds-10.png.webp
new file mode 100644
index 0000000000..a63cad9e9a
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-10.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-11.png.webp b/docs/zh/20-third-party/gds/gds-11.png.webp
new file mode 100644
index 0000000000..fc38cd9a29
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-11.png.webp differ
diff --git a/docs/zh/21-tdinternal/01-arch.md b/docs/zh/21-tdinternal/01-arch.md
index d74366d129..704524fd21 100644
--- a/docs/zh/21-tdinternal/01-arch.md
+++ b/docs/zh/21-tdinternal/01-arch.md
@@ -288,7 +288,7 @@ TDengine 对每个数据采集点单独建表,但在实际应用中经常需
7. vnode 返回本节点的查询计算结果;
8. qnode 完成多节点数据聚合后将最终查询结果返回给客户端;
-由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TAOS SQL。
+由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TDengine SQL。
### 预计算
diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt
index 9d06dbac6d..4a9007acec 100644
--- a/examples/c/CMakeLists.txt
+++ b/examples/c/CMakeLists.txt
@@ -13,15 +13,9 @@ IF (TD_LINUX)
#TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
add_executable(tmq "")
- add_executable(tmq_taosx "")
add_executable(stream_demo "")
add_executable(demoapi "")
- target_sources(tmq_taosx
- PRIVATE
- "tmq_taosx.c"
- )
-
target_sources(tmq
PRIVATE
"tmq.c"
@@ -41,10 +35,6 @@ IF (TD_LINUX)
taos_static
)
- target_link_libraries(tmq_taosx
- taos_static
- )
-
target_link_libraries(stream_demo
taos_static
)
@@ -57,10 +47,6 @@ IF (TD_LINUX)
PUBLIC "${TD_SOURCE_DIR}/include/os"
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
- target_include_directories(tmq_taosx
- PUBLIC "${TD_SOURCE_DIR}/include/os"
- PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
- )
target_include_directories(stream_demo
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
@@ -73,7 +59,6 @@ IF (TD_LINUX)
)
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
- SET_TARGET_PROPERTIES(tmq_taosx PROPERTIES OUTPUT_NAME tmq_taosx)
SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi)
ENDIF ()
diff --git a/examples/c/tmq_taosx.c b/examples/c/tmq_taosx.c
deleted file mode 100644
index 491eda1ddb..0000000000
--- a/examples/c/tmq_taosx.c
+++ /dev/null
@@ -1,489 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#include
-#include
-#include
-#include
-#include
-#include "taos.h"
-
-static int running = 1;
-
-static TAOS* use_db(){
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return NULL;
- }
-
- TAOS_RES* pRes = taos_query(pConn, "use db_taosx");
- if (taos_errno(pRes) != 0) {
- printf("error in use db_taosx, reason:%s\n", taos_errstr(pRes));
- return NULL;
- }
- taos_free_result(pRes);
- return pConn;
-}
-
-static void msg_process(TAOS_RES* msg) {
- /*memset(buf, 0, 1024);*/
- printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg));
- printf("db: %s\n", tmq_get_db_name(msg));
- printf("vg: %d\n", tmq_get_vgroup_id(msg));
- TAOS *pConn = use_db();
- if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
- char* result = tmq_get_json_meta(msg);
- if (result) {
- printf("meta result: %s\n", result);
- }
- tmq_free_json_meta(result);
- }
-
- tmq_raw_data raw = {0};
- tmq_get_raw(msg, &raw);
- int32_t ret = tmq_write_raw(pConn, raw);
- printf("write raw data: %s\n", tmq_err2str(ret));
-
-// else{
-// while(1){
-// int numOfRows = 0;
-// void *pData = NULL;
-// taos_fetch_raw_block(msg, &numOfRows, &pData);
-// if(numOfRows == 0) break;
-// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows);
-// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg));
-// printf("write raw data: %s\n", tmq_err2str(ret));
-// }
-// }
-
- taos_close(pConn);
-}
-
-int32_t init_env() {
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return -1;
- }
-
- TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 4");
- if (taos_errno(pRes) != 0) {
- printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop database if exists abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists abc1 vgroups 3");
- if (taos_errno(pRes) != 0) {
- printf("error in create db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn,
- "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
- "nchar(8), t4 bool)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct1 using st1(t1) tags(2000)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct2 using st1(t1) tags(NULL)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct2, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct1 values(1626006833600, 3, 4, 'b')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct3 using st1(t1) tags(3000)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct4 using st1(t3) tags('ct4')");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct4, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 add column c4 bigint");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 values(1626006833605, 53, 63, 'cffffffffffffffffffffffffffff', 8989898899999) (1626006833609, 51, 62, 'c333', 940)");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 select * from ct1");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 add tag t2 binary(64)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table ct3 set tag t1=5000");
- if (taos_errno(pRes) != 0) {
- printf("failed to slter child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "delete from abc1 .ct3 where ts < 1626006833606");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table ct3 ct1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table st1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
- if (taos_errno(pRes) != 0) {
- printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 add column c3 bigint");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 modify column c2 nchar(8)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 rename column c3 cc3");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 comment 'hello'");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 drop column c1");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into n1 values(now, 'eeee', 8989898899999) (now+9s, 'c333', 940)");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table n1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt1 using jt tags('{\"k1\":1, \"k2\":\"hello\"}')");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt2 using jt tags('')");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn,
- "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
- "nchar(8), t4 bool)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table st1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- taos_close(pConn);
- return 0;
-}
-
-int32_t create_topic() {
- printf("create topic\n");
- TAOS_RES* pRes;
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return -1;
- }
-
- pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
- if (taos_errno(pRes) != 0) {
- printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- taos_close(pConn);
- return 0;
-}
-
-void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
- printf("commit %d tmq %p param %p\n", code, tmq, param);
-}
-
-tmq_t* build_consumer() {
-#if 0
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- assert(pConn != NULL);
-
- TAOS_RES* pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- }
- taos_free_result(pRes);
-#endif
-
- tmq_conf_t* conf = tmq_conf_new();
- tmq_conf_set(conf, "group.id", "tg2");
- tmq_conf_set(conf, "client.id", "my app 1");
- tmq_conf_set(conf, "td.connect.user", "root");
- tmq_conf_set(conf, "td.connect.pass", "taosdata");
- tmq_conf_set(conf, "msg.with.table.name", "true");
- tmq_conf_set(conf, "enable.auto.commit", "true");
- tmq_conf_set(conf, "experimental.snapshot.enable", "true");
-
-
- /*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/
-
- tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
- tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
- assert(tmq);
- tmq_conf_destroy(conf);
- return tmq;
-}
-
-tmq_list_t* build_topic_list() {
- tmq_list_t* topic_list = tmq_list_new();
- tmq_list_append(topic_list, "topic_ctb_column");
- /*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/
- return topic_list;
-}
-
-void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- printf("subscribe err\n");
- return;
- }
- int32_t cnt = 0;
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
- if (tmqmessage) {
- cnt++;
- msg_process(tmqmessage);
- /*if (cnt >= 2) break;*/
- /*printf("get data\n");*/
- taos_free_result(tmqmessage);
- /*} else {*/
- /*break;*/
- /*tmq_commit_sync(tmq, NULL);*/
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
-void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- static const int MIN_COMMIT_COUNT = 1;
-
- int msg_count = 0;
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- return;
- }
-
- tmq_list_t* subList = NULL;
- tmq_subscription(tmq, &subList);
- char** subTopics = tmq_list_to_c_array(subList);
- int32_t sz = tmq_list_get_size(subList);
- printf("subscribed topics: ");
- for (int32_t i = 0; i < sz; i++) {
- printf("%s, ", subTopics[i]);
- }
- printf("\n");
- tmq_list_destroy(subList);
-
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
- if (tmqmessage) {
- msg_process(tmqmessage);
- taos_free_result(tmqmessage);
-
- /*tmq_commit_sync(tmq, NULL);*/
- /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
-int main(int argc, char* argv[]) {
- printf("env init\n");
- if (init_env() < 0) {
- return -1;
- }
- create_topic();
-
- tmq_t* tmq = build_consumer();
- tmq_list_t* topic_list = build_topic_list();
- basic_consume_loop(tmq, topic_list);
- /*sync_consume_loop(tmq, topic_list);*/
-}
diff --git a/include/client/taos.h b/include/client/taos.h
index f260b84f4a..49cfbb52b8 100644
--- a/include/client/taos.h
+++ b/include/client/taos.h
@@ -254,6 +254,7 @@ enum tmq_res_t {
TMQ_RES_INVALID = -1,
TMQ_RES_DATA = 1,
TMQ_RES_TABLE_META = 2,
+ TMQ_RES_TAOSX = 3,
};
typedef struct tmq_raw_data {
diff --git a/include/common/tcommon.h b/include/common/tcommon.h
index 03672f96f3..891c9ab040 100644
--- a/include/common/tcommon.h
+++ b/include/common/tcommon.h
@@ -73,6 +73,7 @@ enum {
TMQ_MSG_TYPE__POLL_RSP,
TMQ_MSG_TYPE__POLL_META_RSP,
TMQ_MSG_TYPE__EP_RSP,
+ TMQ_MSG_TYPE__TAOSX_RSP,
TMQ_MSG_TYPE__END_RSP,
};
@@ -129,7 +130,6 @@ typedef struct SDataBlockInfo {
uint32_t capacity;
// TODO: optimize and remove following
int64_t version; // used for stream, and need serialization
- int64_t ts; // used for stream, and need serialization
int32_t childId; // used for stream, do not serialize
EStreamType type; // used for stream, do not serialize
STimeWindow calWin; // used for stream, do not serialize
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index 03e15ed8e7..f72cb3d6d9 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -144,8 +144,8 @@ void taosCfgDynamicOptions(const char *option, const char *value);
struct SConfig *taosGetCfg();
-void taosSetAllDebugFlag(int32_t flag);
-void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
+void taosSetAllDebugFlag(int32_t flag, bool rewrite);
+void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
int32_t taosSetCfg(SConfig *pCfg, char *name);
void taosLocalCfgForbiddenToChange(char* name, bool* forbidden);
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index 681094471a..d503592361 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -276,7 +276,6 @@ struct SSchema {
char name[TSDB_COL_NAME_LEN];
};
-
typedef struct {
char tbName[TSDB_TABLE_NAME_LEN];
char stbName[TSDB_TABLE_NAME_LEN];
@@ -295,17 +294,15 @@ typedef struct {
SSchema* pSchemas;
} STableMetaRsp;
-
-
typedef struct {
- int32_t code;
- int8_t hashMeta;
- int64_t uid;
- char* tblFName;
- int32_t numOfRows;
- int32_t affectedRows;
- int64_t sver;
- STableMetaRsp* pMeta;
+ int32_t code;
+ int8_t hashMeta;
+ int64_t uid;
+ char* tblFName;
+ int32_t numOfRows;
+ int32_t affectedRows;
+ int64_t sver;
+ STableMetaRsp* pMeta;
} SSubmitBlkRsp;
typedef struct {
@@ -320,7 +317,7 @@ typedef struct {
int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp);
int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp);
-void tFreeSSubmitBlkRsp(void* param);
+void tFreeSSubmitBlkRsp(void* param);
void tFreeSSubmitRsp(SSubmitRsp* pRsp);
#define COL_SMA_ON ((int8_t)0x1)
@@ -2049,8 +2046,8 @@ typedef struct {
STableMetaRsp* pMeta;
} SVCreateTbRsp, SVUpdateTbRsp;
-int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
-int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
+int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
+int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
void tFreeSVCreateTbRsp(void* param);
int32_t tSerializeSVCreateTbReq(void** buf, SVCreateTbReq* pReq);
@@ -2961,6 +2958,25 @@ typedef struct {
int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);
int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp);
+typedef struct {
+ SMqRspHead head;
+ STqOffsetVal reqOffset;
+ STqOffsetVal rspOffset;
+ int32_t blockNum;
+ int8_t withTbName;
+ int8_t withSchema;
+ SArray* blockDataLen;
+ SArray* blockData;
+ SArray* blockTbName;
+ SArray* blockSchema;
+ int32_t createTableNum;
+ SArray* createTableLen;
+ SArray* createTableReq;
+} STaosxRsp;
+
+int32_t tEncodeSTaosxRsp(SEncoder* pEncoder, const STaosxRsp* pRsp);
+int32_t tDecodeSTaosxRsp(SDecoder* pDecoder, STaosxRsp* pRsp);
+
typedef struct {
SMqRspHead head;
char cgroup[TSDB_CGROUP_LEN];
diff --git a/include/libs/function/function.h b/include/libs/function/function.h
index c8db01625e..3f26eee86a 100644
--- a/include/libs/function/function.h
+++ b/include/libs/function/function.h
@@ -92,6 +92,8 @@ struct SResultRowEntryInfo;
//for selectivity query, the corresponding tag value is assigned if the data is qualified
typedef struct SSubsidiaryResInfo {
int16_t num;
+ int32_t rowLen;
+ char* buf; // serialize data buffer
struct SqlFunctionCtx **pCtx;
} SSubsidiaryResInfo;
@@ -118,6 +120,11 @@ typedef struct SInputColumnInfoData {
uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions.
} SInputColumnInfoData;
+typedef struct SSerializeDataHandle {
+ struct SDiskbasedBuf* pBuf;
+ int32_t currentPage;
+} SSerializeDataHandle;
+
// sql function runtime context
typedef struct SqlFunctionCtx {
SInputColumnInfoData input;
@@ -137,10 +144,9 @@ typedef struct SqlFunctionCtx {
SFuncExecFuncs fpSet;
SScalarFuncExecFuncs sfp;
struct SExprInfo *pExpr;
- struct SDiskbasedBuf *pBuf;
struct SSDataBlock *pSrcBlock;
struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
- int32_t curBufPage;
+ SSerializeDataHandle saveHandle;
bool isStream;
char udfName[TSDB_FUNC_NAME_LEN];
diff --git a/include/os/osSemaphore.h b/include/os/osSemaphore.h
index 2a3a2e64b6..e52da96f01 100644
--- a/include/os/osSemaphore.h
+++ b/include/os/osSemaphore.h
@@ -1,74 +1,74 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#ifndef _TD_OS_SEMPHONE_H_
-#define _TD_OS_SEMPHONE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include
-
-#if defined(_TD_DARWIN_64)
-#include
-// typedef struct tsem_s *tsem_t;
-typedef dispatch_semaphore_t tsem_t;
-
-int tsem_init(tsem_t *sem, int pshared, unsigned int value);
-int tsem_wait(tsem_t *sem);
-int tsem_timewait(tsem_t *sim, int64_t nanosecs);
-int tsem_post(tsem_t *sem);
-int tsem_destroy(tsem_t *sem);
-
-#else
-
-#define tsem_t sem_t
-#define tsem_init sem_init
-int tsem_wait(tsem_t *sem);
-int tsem_timewait(tsem_t *sim, int64_t nanosecs);
-#define tsem_post sem_post
-#define tsem_destroy sem_destroy
-
-#endif
-
-#if defined(_TD_DARWIN_64)
-// #define TdThreadRwlock TdThreadMutex
-// #define taosThreadRwlockInit(lock, NULL) taosThreadMutexInit(lock, NULL)
-// #define taosThreadRwlockDestroy(lock) taosThreadMutexDestroy(lock)
-// #define taosThreadRwlockWrlock(lock) taosThreadMutexLock(lock)
-// #define taosThreadRwlockRdlock(lock) taosThreadMutexLock(lock)
-// #define taosThreadRwlockUnlock(lock) taosThreadMutexUnlock(lock)
-
-// #define TdThreadSpinlock TdThreadMutex
-// #define taosThreadSpinInit(lock, NULL) taosThreadMutexInit(lock, NULL)
-// #define taosThreadSpinDestroy(lock) taosThreadMutexDestroy(lock)
-// #define taosThreadSpinLock(lock) taosThreadMutexLock(lock)
-// #define taosThreadSpinUnlock(lock) taosThreadMutexUnlock(lock)
-#endif
-
-bool taosCheckPthreadValid(TdThread thread);
-int64_t taosGetSelfPthreadId();
-int64_t taosGetPthreadId(TdThread thread);
-void taosResetPthread(TdThread *thread);
-bool taosComparePthread(TdThread first, TdThread second);
-int32_t taosGetPId();
-int32_t taosGetAppName(char *name, int32_t *len);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /*_TD_OS_SEMPHONE_H_*/
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef _TD_OS_SEMPHONE_H_
+#define _TD_OS_SEMPHONE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include
+
+#if defined(_TD_DARWIN_64)
+#include
+// typedef struct tsem_s *tsem_t;
+typedef dispatch_semaphore_t tsem_t;
+
+int tsem_init(tsem_t *sem, int pshared, unsigned int value);
+int tsem_wait(tsem_t *sem);
+int tsem_timewait(tsem_t *sim, int64_t nanosecs);
+int tsem_post(tsem_t *sem);
+int tsem_destroy(tsem_t *sem);
+
+#else
+
+#define tsem_t sem_t
+#define tsem_init sem_init
+int tsem_wait(tsem_t *sem);
+int tsem_timewait(tsem_t *sim, int64_t nanosecs);
+#define tsem_post sem_post
+#define tsem_destroy sem_destroy
+
+#endif
+
+#if defined(_TD_DARWIN_64)
+// #define TdThreadRwlock TdThreadMutex
+// #define taosThreadRwlockInit(lock, NULL) taosThreadMutexInit(lock, NULL)
+// #define taosThreadRwlockDestroy(lock) taosThreadMutexDestroy(lock)
+// #define taosThreadRwlockWrlock(lock) taosThreadMutexLock(lock)
+// #define taosThreadRwlockRdlock(lock) taosThreadMutexLock(lock)
+// #define taosThreadRwlockUnlock(lock) taosThreadMutexUnlock(lock)
+
+// #define TdThreadSpinlock TdThreadMutex
+// #define taosThreadSpinInit(lock, NULL) taosThreadMutexInit(lock, NULL)
+// #define taosThreadSpinDestroy(lock) taosThreadMutexDestroy(lock)
+// #define taosThreadSpinLock(lock) taosThreadMutexLock(lock)
+// #define taosThreadSpinUnlock(lock) taosThreadMutexUnlock(lock)
+#endif
+
+bool taosCheckPthreadValid(TdThread thread);
+int64_t taosGetSelfPthreadId();
+int64_t taosGetPthreadId(TdThread thread);
+void taosResetPthread(TdThread *thread);
+bool taosComparePthread(TdThread first, TdThread second);
+int32_t taosGetPId();
+int32_t taosGetAppName(char *name, int32_t *len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*_TD_OS_SEMPHONE_H_*/
diff --git a/include/util/tcompare.h b/include/util/tcompare.h
index cc9e8ae464..c7a3ca20f2 100644
--- a/include/util/tcompare.h
+++ b/include/util/tcompare.h
@@ -105,6 +105,97 @@ int32_t compareStrPatternNotMatch(const void *pLeft, const void *pRight);
int32_t compareWStrPatternMatch(const void *pLeft, const void *pRight);
int32_t compareWStrPatternNotMatch(const void *pLeft, const void *pRight);
+int32_t compareInt8Int16(const void *pLeft, const void *pRight);
+int32_t compareInt8Int32(const void *pLeft, const void *pRight);
+int32_t compareInt8Int64(const void *pLeft, const void *pRight);
+int32_t compareInt8Float(const void *pLeft, const void *pRight);
+int32_t compareInt8Double(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint64(const void *pLeft, const void *pRight);
+int32_t compareInt16Int8(const void *pLeft, const void *pRight);
+int32_t compareInt16Int32(const void *pLeft, const void *pRight);
+int32_t compareInt16Int64(const void *pLeft, const void *pRight);
+int32_t compareInt16Float(const void *pLeft, const void *pRight);
+int32_t compareInt16Double(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint64(const void *pLeft, const void *pRight);
+int32_t compareInt32Int8(const void *pLeft, const void *pRight);
+int32_t compareInt32Int16(const void *pLeft, const void *pRight);
+int32_t compareInt32Int64(const void *pLeft, const void *pRight);
+int32_t compareInt32Float(const void *pLeft, const void *pRight);
+int32_t compareInt32Double(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint64(const void *pLeft, const void *pRight);
+int32_t compareInt64Int8(const void *pLeft, const void *pRight);
+int32_t compareInt64Int16(const void *pLeft, const void *pRight);
+int32_t compareInt64Int32(const void *pLeft, const void *pRight);
+int32_t compareInt64Float(const void *pLeft, const void *pRight);
+int32_t compareInt64Double(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint64(const void *pLeft, const void *pRight);
+int32_t compareFloatInt8(const void *pLeft, const void *pRight);
+int32_t compareFloatInt16(const void *pLeft, const void *pRight);
+int32_t compareFloatInt32(const void *pLeft, const void *pRight);
+int32_t compareFloatInt64(const void *pLeft, const void *pRight);
+int32_t compareFloatDouble(const void *pLeft, const void *pRight);
+int32_t compareFloatUint8(const void *pLeft, const void *pRight);
+int32_t compareFloatUint16(const void *pLeft, const void *pRight);
+int32_t compareFloatUint32(const void *pLeft, const void *pRight);
+int32_t compareFloatUint64(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt8(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt16(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt32(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt64(const void *pLeft, const void *pRight);
+int32_t compareDoubleFloat(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint8(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint16(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint32(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint64(const void *pLeft, const void *pRight);
+int32_t compareUint8Int8(const void *pLeft, const void *pRight);
+int32_t compareUint8Int16(const void *pLeft, const void *pRight);
+int32_t compareUint8Int32(const void *pLeft, const void *pRight);
+int32_t compareUint8Int64(const void *pLeft, const void *pRight);
+int32_t compareUint8Float(const void *pLeft, const void *pRight);
+int32_t compareUint8Double(const void *pLeft, const void *pRight);
+int32_t compareUint8Uint16(const void *pLeft, const void *pRight);
+int32_t compareUint8Uint32(const void *pLeft, const void *pRight);
+int32_t compareUint8Uint64(const void *pLeft, const void *pRight);
+int32_t compareUint16Int8(const void *pLeft, const void *pRight);
+int32_t compareUint16Int16(const void *pLeft, const void *pRight);
+int32_t compareUint16Int32(const void *pLeft, const void *pRight);
+int32_t compareUint16Int64(const void *pLeft, const void *pRight);
+int32_t compareUint16Float(const void *pLeft, const void *pRight);
+int32_t compareUint16Double(const void *pLeft, const void *pRight);
+int32_t compareUint16Uint8(const void *pLeft, const void *pRight);
+int32_t compareUint16Uint32(const void *pLeft, const void *pRight);
+int32_t compareUint16Uint64(const void *pLeft, const void *pRight);
+int32_t compareUint32Int8(const void *pLeft, const void *pRight);
+int32_t compareUint32Int16(const void *pLeft, const void *pRight);
+int32_t compareUint32Int32(const void *pLeft, const void *pRight);
+int32_t compareUint32Int64(const void *pLeft, const void *pRight);
+int32_t compareUint32Float(const void *pLeft, const void *pRight);
+int32_t compareUint32Double(const void *pLeft, const void *pRight);
+int32_t compareUint32Uint8(const void *pLeft, const void *pRight);
+int32_t compareUint32Uint16(const void *pLeft, const void *pRight);
+int32_t compareUint32Uint64(const void *pLeft, const void *pRight);
+int32_t compareUint64Int8(const void *pLeft, const void *pRight);
+int32_t compareUint64Int16(const void *pLeft, const void *pRight);
+int32_t compareUint64Int32(const void *pLeft, const void *pRight);
+int32_t compareUint64Int64(const void *pLeft, const void *pRight);
+int32_t compareUint64Float(const void *pLeft, const void *pRight);
+int32_t compareUint64Double(const void *pLeft, const void *pRight);
+int32_t compareUint64Uint8(const void *pLeft, const void *pRight);
+int32_t compareUint64Uint16(const void *pLeft, const void *pRight);
+int32_t compareUint64Uint32(const void *pLeft, const void *pRight);
+
__compar_fn_t getComparFunc(int32_t type, int32_t optr);
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order);
int32_t doCompare(const char *a, const char *b, int32_t type, size_t size);
diff --git a/include/util/tencode.h b/include/util/tencode.h
index ad642cd612..a6dd58297e 100644
--- a/include/util/tencode.h
+++ b/include/util/tencode.h
@@ -264,12 +264,14 @@ static FORCE_INLINE int32_t tEncodeDouble(SEncoder* pCoder, double val) {
static FORCE_INLINE int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len) {
if (tEncodeU32v(pCoder, len) < 0) return -1;
- if (pCoder->data) {
- if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
- memcpy(TD_CODER_CURRENT(pCoder), val, len);
- }
+ if (len) {
+ if (pCoder->data) {
+ if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
+ memcpy(TD_CODER_CURRENT(pCoder), val, len);
+ }
- TD_CODER_MOVE_POS(pCoder, len);
+ TD_CODER_MOVE_POS(pCoder, len);
+ }
return 0;
}
@@ -414,14 +416,18 @@ static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val) {
static FORCE_INLINE int32_t tDecodeBinaryAlloc(SDecoder* pCoder, void** val, uint64_t* len) {
uint64_t length = 0;
if (tDecodeU64v(pCoder, &length) < 0) return -1;
- if (len) *len = length;
+ if (length) {
+ if (len) *len = length;
- if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
- *val = taosMemoryMalloc(length);
- if (*val == NULL) return -1;
- memcpy(*val, TD_CODER_CURRENT(pCoder), length);
+ if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
+ *val = taosMemoryMalloc(length);
+ if (*val == NULL) return -1;
+ memcpy(*val, TD_CODER_CURRENT(pCoder), length);
- TD_CODER_MOVE_POS(pCoder, length);
+ TD_CODER_MOVE_POS(pCoder, length);
+ } else {
+ *val = NULL;
+ }
return 0;
}
diff --git a/include/util/tpagedbuf.h b/include/util/tpagedbuf.h
index 57a489c0dd..9ab89273e6 100644
--- a/include/util/tpagedbuf.h
+++ b/include/util/tpagedbuf.h
@@ -58,11 +58,10 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
/**
*
* @param pBuf
- * @param groupId
* @param pageId
* @return
*/
-void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId);
+void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId);
/**
*
diff --git a/packaging/MPtestJenkinsfile b/packaging/MPtestJenkinsfile
index 1e2e69a977..45c8d8abf2 100644
--- a/packaging/MPtestJenkinsfile
+++ b/packaging/MPtestJenkinsfile
@@ -1,7 +1,7 @@
def sync_source(branch_name) {
sh '''
hostname
- env
+ ip addr|grep 192|awk '{print $2}'|sed "s/\\/.*//"
echo ''' + branch_name + '''
'''
sh '''
@@ -15,6 +15,7 @@ def sync_source(branch_name) {
cd ${TDENGINE_ROOT_DIR}
git reset --hard
git fetch || git fetch
+ rm -rf examples/rust/
git checkout ''' + branch_name + ''' -f
git branch
git pull || git pull
@@ -53,6 +54,16 @@ pipeline {
defaultValue:'3.0.0.1',
description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
)
+ string (
+ name:'toolsVersion',
+ defaultValue:'2.1.2',
+ description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
+ )
+ string (
+ name:'toolsBaseVersion',
+ defaultValue:'2.1.2',
+ description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
+ )
}
environment{
WORK_DIR = '/var/lib/jenkins/workspace'
@@ -61,7 +72,7 @@ pipeline {
BRANCH_NAME = '3.0'
TD_SERVER_TAR = "TDengine-server-${version}-Linux-x64.tar.gz"
- BASE_TD_SERVER_TAR = "TDengine-server-${baseVersion}-arm64-x64.tar.gz"
+ BASE_TD_SERVER_TAR = "TDengine-server-${baseVersion}-Linux-x64.tar.gz"
TD_SERVER_ARM_TAR = "TDengine-server-${version}-Linux-arm64.tar.gz"
BASE_TD_SERVER_ARM_TAR = "TDengine-server-${baseVersion}-Linux-arm64.tar.gz"
@@ -70,7 +81,7 @@ pipeline {
BASE_TD_SERVER_LITE_TAR = "TDengine-server-${baseVersion}-Linux-x64-Lite.tar.gz"
TD_CLIENT_TAR = "TDengine-client-${version}-Linux-x64.tar.gz"
- BASE_TD_CLIENT_TAR = "TDengine-client-${baseVersion}-arm64-x64.tar.gz"
+ BASE_TD_CLIENT_TAR = "TDengine-client-${baseVersion}-Linux-x64.tar.gz"
TD_CLIENT_ARM_TAR = "TDengine-client-${version}-Linux-arm64.tar.gz"
BASE_TD_CLIENT_ARM_TAR = "TDengine-client-${baseVersion}-Linux-arm64.tar.gz"
@@ -86,31 +97,22 @@ pipeline {
TD_CLIENT_EXE = "TDengine-client-${version}-Windows-x64.exe"
+ TD_TOOLS_TAR = "taosTools-${toolsVersion}-Linux-x64.tar.gz"
+
}
stages {
stage ('RUN') {
- stage('get check package scritps'){
- agent{label 'ubuntu18'}
- steps {
- catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
- script{
- sync_source("${BRANCH_NAME}")
- }
-
- }
- }
- }
parallel {
stage('ubuntu16') {
agent{label " ubuntu16 "}
steps {
- timeout(time: 3, unit: 'MINUTES'){
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
python3 checkPackageRuning.py
- rmtaos
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
@@ -128,12 +130,12 @@ pipeline {
stage('ubuntu18') {
agent{label " ubuntu18 "}
steps {
- timeout(time: 3, unit: 'MINUTES'){
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
python3 checkPackageRuning.py
- rmtaos
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
@@ -151,12 +153,12 @@ pipeline {
stage('centos7') {
agent{label " centos7_9 "}
steps {
- timeout(time: 240, unit: 'MINUTES'){
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
python3 checkPackageRuning.py
- rmtaos
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
@@ -174,12 +176,12 @@ pipeline {
stage('centos8') {
agent{label " centos8_3 "}
steps {
- timeout(time: 240, unit: 'MINUTES'){
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
python3 checkPackageRuning.py
- rmtaos
'''
sh '''
cd ${TDENGINE_ROOT_DIR}/packaging
diff --git a/packaging/checkPackageRuning.py b/packaging/checkPackageRuning.py
index e53cc3bdbc..c0d1e8b86c 100755
--- a/packaging/checkPackageRuning.py
+++ b/packaging/checkPackageRuning.py
@@ -22,10 +22,12 @@ import time
# install taospy
out = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
-print(out)
+print("taospy version %s "%out)
if (out == "" ):
os.system("pip install git+https://github.com/taosdata/taos-connector-python.git")
print("install taos python connector")
+else:
+ os.system("pip3 install --upgrade taospy ")
diff --git a/packaging/docker/README.md b/packaging/docker/README.md
index cb27d3bca6..763ab73724 100644
--- a/packaging/docker/README.md
+++ b/packaging/docker/README.md
@@ -158,7 +158,7 @@ When you build your application with docker, you should add the TDengine client
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -265,7 +265,7 @@ Full version of dockerfile could be:
```dockerfile
FROM golang:1.17.6-buster as builder
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -279,7 +279,7 @@ RUN go env && go mod tidy && go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh
index 173fa3a3c3..4b6264db2b 100755
--- a/packaging/testpackage.sh
+++ b/packaging/testpackage.sh
@@ -1,8 +1,5 @@
#!/bin/sh
-# function installPkgAndCheckFile{
-
-echo "Download package"
packgeName=$1
version=$2
@@ -10,6 +7,7 @@ originPackageName=$3
originversion=$4
testFile=$5
subFile="taos.tar.gz"
+password=$6
if [ ${testFile} = "server" ];then
tdPath="TDengine-server-${version}"
@@ -25,22 +23,42 @@ elif [ ${testFile} = "tools" ];then
installCmd="install-taostools.sh"
fi
+function cmdInstall {
+comd=$1
+if command -v ${comd} ;then
+ echo "${comd} is already installed"
+else
+ if command -v apt ;then
+ apt-get install ${comd} -y
+ elif command -v yum ;then
+ yum -y install ${comd}
+ echo "you should install ${comd} manually"
+ fi
+fi
+}
+
+
echo "Uninstall all components of TDeingne"
if command -v rmtaos ;then
echo "uninstall all components of TDeingne:rmtaos"
- echo " "
+ rmtaos
else
echo "os doesn't include TDengine "
fi
if command -v rmtaostools ;then
echo "uninstall all components of TDeingne:rmtaostools"
- echo " "
+ rmtaostools
else
echo "os doesn't include rmtaostools "
fi
+
+cmdInstall tree
+cmdInstall wget
+cmdInstall sshpass
+
echo "new workroom path"
installPath="/usr/local/src/packageTest"
oriInstallPath="/usr/local/src/packageTest/3.1"
@@ -58,22 +76,49 @@ else
echo "${oriInstallPath} already exists"
fi
-echo "decompress installPackage"
+
+
+
+echo "download installPackage"
+# cd ${installPath}
+# wget https://www.taosdata.com/assets-download/3.0/${packgeName}
+# cd ${oriInstallPath}
+# wget https://www.taosdata.com/assets-download/3.0/${originPackageName}
cd ${installPath}
-wget https://www.taosdata.com/assets-download/3.0/${packgeName}
-cd ${oriInstallPath}
-wget https://www.taosdata.com/assets-download/3.0/${originPackageName}
-
+if [ ! -f {packgeName} ];then
+ sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} .
+fi
+if [ ! -f debAuto.sh ];then
+ echo '#!/usr/bin/expect ' > debAuto.sh
+ echo 'set timeout 3 ' >> debAuto.sh
+ echo 'pset packgeName [lindex $argv 0]' >> debAuto.sh
+ echo 'spawn dpkg -i ${packgeName}' >> debAuto.sh
+ echo 'expect "*one:"' >> debAuto.sh
+ echo 'send "\r"' >> debAuto.sh
+ echo 'expect "*skip:"' >> debAuto.sh
+ echo 'send "\r" ' >> debAuto.sh
+fi
if [[ ${packgeName} =~ "deb" ]];then
- echo "dpkg ${packgeName}" && dpkg -i ${packgeName}
+ cd ${installPath}
+ dpkg -r taostools
+ dpkg -r tdengine
+ if [[ ${packgeName} =~ "TDengine" ]];then
+ echo "./debAuto.sh ${packgeName}" && chmod 755 debAuto.sh && ./debAuto.sh ${packgeName}
+ else
+ echo "dpkg -i ${packgeName}" && dpkg -i ${packgeName}
+
elif [[ ${packgeName} =~ "rpm" ]];then
- echo "rpm ${packgeName}" && rpm -ivh ${packgeName}
+ cd ${installPath}
+ echo "rpm ${packgeName}" && rpm -ivh ${packgeName} --quiet
elif [[ ${packgeName} =~ "tar" ]];then
- echo "tar ${packgeName}" && tar -xvf ${packgeName}
- cd ${oriInstallPath}
+ cd ${oriInstallPath}
+ if [ ! -f {originPackageName} ];then
+ sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${originversion}/community${originPackageName} .
+ fi
echo "tar -xvf ${originPackageName}" && tar -xvf ${originPackageName}
+
cd ${installPath}
echo "tar -xvf ${packgeName}" && tar -xvf ${packgeName}
@@ -87,10 +132,10 @@ elif [[ ${packgeName} =~ "tar" ]];then
cd ${installPath}
- tree ${oriInstallPath}/${originTdpPath} > ${originPackageName}_checkfile
- tree ${installPath}/${tdPath} > ${packgeName}_checkfile
+ tree ${oriInstallPath}/${originTdpPath} > ${oriInstallPath}/${originPackageName}_checkfile
+ tree ${installPath}/${tdPath} > ${installPath}/${packgeName}_checkfile
- diff ${packgeName}_checkfile ${originPackageName}_checkfile > ${installPath}/diffFile.log
+ diff ${installPath}/${packgeName}_checkfile ${oriInstallPath}/${originPackageName}_checkfile > ${installPath}/diffFile.log
diffNumbers=`cat ${installPath}/diffFile.log |wc -l `
if [ ${diffNumbers} != 0 ];then
echo "The number and names of files have changed from the previous installation package"
@@ -104,9 +149,21 @@ elif [[ ${packgeName} =~ "tar" ]];then
else
bash ${installCmd}
fi
+ if [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]] ;then
+ cd ${installPath}
+ sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
+ # wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz
+ tar xvf taosTools-2.1.2-Linux-x64.tar.gz
+ cd taosTools-2.1.2 && bash install-taostools.sh
+ elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then
+ cd ${installPath}
+ sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.deb .
+ dpkg -i taosTools-2.1.2-Linux-x64.deb
+ elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then
+ cd ${installPath}
+ sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.rpm .
+ rpm -ivh taosTools-2.1.2-Linux-x64.rpm --quiet
+ fi
-fi
-# }
-
-# installPkgAndCheckFile
+fi
diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt
index f52edbe71f..e8e3c87849 100644
--- a/source/client/CMakeLists.txt
+++ b/source/client/CMakeLists.txt
@@ -27,11 +27,18 @@ else()
INCLUDE_DIRECTORIES(jni/linux)
endif()
+set_target_properties(
+ taos
+ PROPERTIES
+ CLEAN_DIRECT_OUTPUT
+ 1
+)
+
set_target_properties(
taos
PROPERTIES
VERSION ${TD_VER_NUMBER}
- SOVERSION ${TD_VER_NUMBER}
+ SOVERSION 1
)
add_library(taos_static STATIC ${CLIENT_SRC})
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index 4331da1506..b8fa9580e7 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -52,15 +52,17 @@ enum {
RES_TYPE__QUERY = 1,
RES_TYPE__TMQ,
RES_TYPE__TMQ_META,
+ RES_TYPE__TAOSX,
};
#define SHOW_VARIABLES_RESULT_COLS 2
#define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
-#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
-#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
-#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
+#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
+#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ || *(int8_t*)res == RES_TYPE__TAOSX)
+#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
+#define TD_RES_TMQ_TAOSX(res) (*(int8_t*)res == RES_TYPE__TAOSX)
typedef struct SAppInstInfo SAppInstInfo;
@@ -200,8 +202,8 @@ typedef struct {
int32_t vgId;
SSchemaWrapper schema;
int32_t resIter;
- SMqDataRsp rsp;
SReqResultInfo resInfo;
+ SMqDataRsp rsp;
} SMqRspObj;
typedef struct {
@@ -212,6 +214,17 @@ typedef struct {
SMqMetaRsp metaRsp;
} SMqMetaRspObj;
+typedef struct {
+ int8_t resType;
+ char topic[TSDB_TOPIC_FNAME_LEN];
+ char db[TSDB_DB_FNAME_LEN];
+ int32_t vgId;
+ SSchemaWrapper schema;
+ int32_t resIter;
+ SReqResultInfo resInfo;
+ STaosxRsp rsp;
+} SMqTaosxRspObj;
+
typedef struct SRequestObj {
int8_t resType; // query or tmq
uint64_t requestId;
diff --git a/source/client/src/TMQConnector.c b/source/client/src/TMQConnector.c
index 17d3a212c4..fcf6957df9 100644
--- a/source/client/src/TMQConnector.c
+++ b/source/client/src/TMQConnector.c
@@ -42,6 +42,7 @@ void commit_cb(tmq_t *tmq, int32_t code, void *param) {
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqConfNewImp(JNIEnv *env, jobject jobj) {
tmq_conf_t *conf = tmq_conf_new();
+ jniGetGlobalMethod(env);
return (jlong)conf;
}
diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c
index 9475d1b51e..56e3527f96 100644
--- a/source/client/src/clientHb.c
+++ b/source/client/src/clientHb.c
@@ -145,7 +145,7 @@ static int32_t hbProcessStbInfoRsp(void *value, int32_t valueLen, struct SCatalo
}
static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
- SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
+ SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
if (NULL == pReq) {
tscWarn("pReq to get activeInfo, may be dropped, refId:%" PRIx64 ", type:%d", pRsp->connKey.tscRid,
pRsp->connKey.connType);
@@ -260,6 +260,8 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
}
}
+ taosHashRelease(pAppHbMgr->activeInfo, pReq);
+
return TSDB_CODE_SUCCESS;
}
@@ -914,10 +916,11 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in
}
void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey) {
- SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
+ SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
if (pReq) {
tFreeClientHbReq(pReq);
taosHashRemove(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
+ taosHashRelease(pAppHbMgr->activeInfo, pReq);
}
if (NULL == pReq) {
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 9ceb6e0683..3086078080 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -184,6 +184,19 @@ void taos_free_result(TAOS_RES *res) {
SRequestObj *pRequest = (SRequestObj *)res;
tscDebug("0x%" PRIx64 " taos_free_result start to free query", pRequest->requestId);
destroyRequest(pRequest);
+ } else if (TD_RES_TMQ_TAOSX(res)) {
+ SMqTaosxRspObj *pRsp = (SMqTaosxRspObj *)res;
+ if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
+ if (pRsp->rsp.blockDataLen) taosArrayDestroy(pRsp->rsp.blockDataLen);
+ if (pRsp->rsp.withTbName) taosArrayDestroyP(pRsp->rsp.blockTbName, taosMemoryFree);
+ if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ // taosx
+ taosArrayDestroy(pRsp->rsp.createTableLen);
+ taosArrayDestroyP(pRsp->rsp.createTableReq, taosMemoryFree);
+
+ pRsp->resInfo.pRspMsg = NULL;
+ doFreeReqResultInfo(&pRsp->resInfo);
+ taosMemoryFree(pRsp);
} else if (TD_RES_TMQ(res)) {
SMqRspObj *pRsp = (SMqRspObj *)res;
if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
diff --git a/source/client/src/tmq.c b/source/client/src/tmq.c
index fa657fcb10..29d509c27c 100644
--- a/source/client/src/tmq.c
+++ b/source/client/src/tmq.c
@@ -164,6 +164,7 @@ typedef struct {
union {
SMqDataRsp dataRsp;
SMqMetaRsp metaRsp;
+ STaosxRsp taosxRsp;
};
} SMqPollRspWrapper;
@@ -1130,21 +1131,29 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp);
tDecoderClear(&decoder);
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
- } else {
- ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP);
+
+ tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
+ tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
+ rspType);
+
+ } else if (rspType == TMQ_MSG_TYPE__POLL_META_RSP) {
SDecoder decoder;
tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
tDecodeSMqMetaRsp(&decoder, &pRspWrapper->metaRsp);
tDecoderClear(&decoder);
memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead));
+ } else if (rspType == TMQ_MSG_TYPE__TAOSX_RSP) {
+ SDecoder decoder;
+ tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
+ tDecodeSTaosxRsp(&decoder, &pRspWrapper->taosxRsp);
+ tDecoderClear(&decoder);
+ memcpy(&pRspWrapper->taosxRsp, pMsg->pData, sizeof(SMqRspHead));
+ } else {
+ ASSERT(0);
}
taosMemoryFree(pMsg->pData);
- tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
- tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
- rspType);
-
taosWriteQitem(tmq->mqueue, pRspWrapper);
tsem_post(&tmq->rspSem);
@@ -1443,6 +1452,24 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper) {
return pRspObj;
}
+SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
+ SMqTaosxRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqTaosxRspObj));
+ pRspObj->resType = RES_TYPE__TAOSX;
+ tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN);
+ tstrncpy(pRspObj->db, pWrapper->topicHandle->db, TSDB_DB_FNAME_LEN);
+ pRspObj->vgId = pWrapper->vgHandle->vgId;
+ pRspObj->resIter = -1;
+ memcpy(&pRspObj->rsp, &pWrapper->dataRsp, sizeof(SMqTaosxRspObj));
+
+ pRspObj->resInfo.totalRows = 0;
+ pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI;
+ if (!pWrapper->dataRsp.withSchema) {
+ setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
+ }
+
+ return pRspObj;
+}
+
int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
/*tscDebug("call poll");*/
for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
@@ -1595,6 +1622,30 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
taosFreeQitem(pollRspWrapper);
}
+ } else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__TAOSX_RSP) {
+ SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper;
+ /*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/
+ int32_t consumerEpoch = atomic_load_32(&tmq->epoch);
+ if (pollRspWrapper->taosxRsp.head.epoch == consumerEpoch) {
+ SMqClientVg* pVg = pollRspWrapper->vgHandle;
+ /*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
+ * rspMsg->msg.rspOffset);*/
+ pVg->currentOffset = pollRspWrapper->taosxRsp.rspOffset;
+ atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
+ if (pollRspWrapper->taosxRsp.blockNum == 0) {
+ taosFreeQitem(pollRspWrapper);
+ rspWrapper = NULL;
+ continue;
+ }
+ // build rsp
+ SMqRspObj* pRsp = tmqBuildRspFromWrapper(pollRspWrapper);
+ taosFreeQitem(pollRspWrapper);
+ return pRsp;
+ } else {
+ tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d\n",
+ pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
+ taosFreeQitem(pollRspWrapper);
+ }
} else {
/*printf("handle ep rsp %d\n", rspMsg->head.mqMsgType);*/
bool reset = false;
@@ -1707,9 +1758,11 @@ tmq_res_t tmq_get_res_type(TAOS_RES* res) {
} else if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DELETE) {
- return TMQ_RES_DATA;
+ return TMQ_RES_TAOSX;
}
return TMQ_RES_TABLE_META;
+ } else if (TD_RES_TMQ_TAOSX(res)) {
+ return TMQ_RES_TAOSX;
} else {
return TMQ_RES_INVALID;
}
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index ee9d751555..0bab6a8611 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -427,6 +427,152 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
return 0;
}
+static int32_t taosUpdateServerCfg(SConfig *pCfg) {
+ SConfigItem *pItem;
+ ECfgSrcType stype;
+ int32_t numOfCores;
+ int64_t totalMemoryKB;
+
+ pItem = cfgGetItem(tsCfg, "numOfCores");
+ if (pItem == NULL) {
+ return -1;
+ } else {
+ stype = pItem->stype;
+ numOfCores = pItem->fval;
+ }
+
+ pItem = cfgGetItem(tsCfg, "supportVnodes");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSupportVnodes = numOfCores * 2;
+ tsNumOfSupportVnodes = TMAX(tsNumOfSupportVnodes, 2);
+ pItem->i32 = tsNumOfSupportVnodes;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfRpcThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfRpcThreads = numOfCores / 2;
+ tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4);
+ pItem->i32 = tsNumOfRpcThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfCommitThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfCommitThreads = numOfCores / 2;
+ tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
+ pItem->i32 = tsNumOfCommitThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfMnodeReadThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfMnodeReadThreads = numOfCores / 8;
+ tsNumOfMnodeReadThreads = TRANGE(tsNumOfMnodeReadThreads, 1, 4);
+ pItem->i32 = tsNumOfMnodeReadThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeQueryThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeQueryThreads = numOfCores * 2;
+ tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4);
+ pItem->i32 = tsNumOfVnodeQueryThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeStreamThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeStreamThreads = numOfCores / 4;
+ tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4);
+ pItem->i32 = tsNumOfVnodeStreamThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeFetchThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeFetchThreads = numOfCores / 4;
+ tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
+ pItem->i32 = tsNumOfVnodeFetchThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeWriteThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeWriteThreads = numOfCores;
+ tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
+ pItem->i32 = tsNumOfVnodeWriteThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeSyncThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeSyncThreads = numOfCores * 2;
+ tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
+ pItem->i32 = tsNumOfVnodeSyncThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeRsmaThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeRsmaThreads = numOfCores;
+ tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4);
+ pItem->i32 = tsNumOfVnodeRsmaThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfQnodeQueryThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfQnodeQueryThreads = numOfCores * 2;
+ tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
+ pItem->i32 = tsNumOfQnodeQueryThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfQnodeFetchThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfQnodeFetchThreads = numOfCores / 2;
+ tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4);
+ pItem->i32 = tsNumOfQnodeFetchThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfSnodeSharedThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSnodeSharedThreads = numOfCores / 4;
+ tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4);
+ pItem->i32 = tsNumOfSnodeSharedThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfSnodeUniqueThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSnodeUniqueThreads = numOfCores / 4;
+ tsNumOfSnodeUniqueThreads = TRANGE(tsNumOfSnodeUniqueThreads, 2, 4);
+ pItem->i32 = tsNumOfSnodeUniqueThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "totalMemoryKB");
+ if (pItem == NULL) {
+ return -1;
+ } else {
+ stype = pItem->stype;
+ totalMemoryKB = pItem->i64;
+ }
+
+ pItem = cfgGetItem(tsCfg, "rpcQueueMemoryAllowed");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsRpcQueueMemoryAllowed = totalMemoryKB * 1024 * 0.1;
+ tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL);
+ pItem->i64 = tsRpcQueueMemoryAllowed;
+ pItem->stype = stype;
+ }
+
+ return 0;
+}
+
+
static void taosSetClientLogCfg(SConfig *pCfg) {
SConfigItem *pItem = cfgGetItem(pCfg, "logDir");
tstrncpy(tsLogDir, cfgGetItem(pCfg, "logDir")->str, PATH_MAX);
@@ -981,7 +1127,7 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi
taosSetServerLogCfg(pCfg);
}
- taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32);
+ taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32, false);
if (taosMulMkDir(tsLogDir) != 0) {
uError("failed to create dir:%s since %s", tsLogDir, terrstr());
@@ -1048,6 +1194,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
if (taosSetClientCfg(tsCfg)) return -1;
} else {
if (taosSetClientCfg(tsCfg)) return -1;
+ if (taosUpdateServerCfg(tsCfg)) return -1;
if (taosSetServerCfg(tsCfg)) return -1;
if (taosSetTfsCfg(tsCfg) != 0) return -1;
}
@@ -1072,7 +1219,7 @@ void taosCleanupCfg() {
void taosCfgDynamicOptions(const char *option, const char *value) {
if (strncasecmp(option, "debugFlag", 9) == 0) {
int32_t flag = atoi(value);
- taosSetAllDebugFlag(flag);
+ taosSetAllDebugFlag(flag, true);
return;
}
@@ -1097,11 +1244,13 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
"tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag",
"tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
+ "jniDebugFlag",
};
int32_t *optionVars[] = {
&dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag,
&tqDebugFlag, &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag,
&tmrDebugFlag, &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag,
+ &jniDebugFlag,
};
int32_t optionSize = tListLen(options);
@@ -1113,41 +1262,42 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
int32_t flag = atoi(value);
uInfo("%s set from %d to %d", optName, *optionVars[d], flag);
*optionVars[d] = flag;
- taosSetDebugFlag(optionVars[d], optName, flag);
+ taosSetDebugFlag(optionVars[d], optName, flag, true);
return;
}
uError("failed to cfg dynamic option:%s value:%s", option, value);
}
-void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal) {
+void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite) {
SConfigItem *pItem = cfgGetItem(tsCfg, flagName);
- if (pItem != NULL) {
+ if (pItem != NULL && (rewrite || pItem->i32 == 0)) {
pItem->i32 = flagVal;
}
*pFlagPtr = flagVal;
}
-void taosSetAllDebugFlag(int32_t flag) {
+void taosSetAllDebugFlag(int32_t flag, bool rewrite) {
if (flag <= 0) return;
- taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag);
- taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag);
- taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag);
- taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag);
- taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag);
- taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag);
- taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag);
- taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag);
- taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag);
- taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag);
- taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag);
- taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag);
- taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag);
- taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag);
- taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag);
- taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag);
- taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag);
- taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag);
+ taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&metaDebugFlag, "tmrDebugFlag", flag, rewrite);
uInfo("all debug flag are set to %d", flag);
}
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index 8dc4931573..2fc93cc9b5 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -3330,7 +3330,7 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
return 0;
}
-void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp*)pRsp)->pSchemas); }
+void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); }
void tFreeSTableIndexRsp(void *info) {
if (NULL == info) {
@@ -5119,17 +5119,17 @@ int tDecodeSVCreateTbRsp(SDecoder *pCoder, SVCreateTbRsp *pRsp) {
} else {
pRsp->pMeta = NULL;
}
-
+
tEndDecode(pCoder);
return 0;
}
-void tFreeSVCreateTbRsp(void* param) {
+void tFreeSVCreateTbRsp(void *param) {
if (NULL == param) {
return;
}
-
- SVCreateTbRsp* pRsp = (SVCreateTbRsp*)param;
+
+ SVCreateTbRsp *pRsp = (SVCreateTbRsp *)param;
if (pRsp->pMeta) {
taosMemoryFree(pRsp->pMeta->pSchemas);
taosMemoryFree(pRsp->pMeta);
@@ -5345,7 +5345,7 @@ static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
if (tDecodeI32v(pDecoder, &pBlock->numOfRows) < 0) return -1;
if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1;
if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1;
-
+
int32_t meta = 0;
if (tDecodeI32(pDecoder, &meta) < 0) return -1;
if (meta) {
@@ -5393,12 +5393,12 @@ int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) {
return 0;
}
-void tFreeSSubmitBlkRsp(void* param) {
+void tFreeSSubmitBlkRsp(void *param) {
if (NULL == param) {
return;
}
-
- SSubmitBlkRsp* pRsp = (SSubmitBlkRsp*)param;
+
+ SSubmitBlkRsp *pRsp = (SSubmitBlkRsp *)param;
taosMemoryFree(pRsp->tblFName);
if (pRsp->pMeta) {
@@ -5407,7 +5407,6 @@ void tFreeSSubmitBlkRsp(void* param) {
}
}
-
void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
if (NULL == pRsp) return;
@@ -5619,7 +5618,6 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp *pRsp) {
}
}
-
int32_t tEncodeSMCreateStbRsp(SEncoder *pEncoder, const SMCreateStbRsp *pRsp) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->pMeta->pSchemas ? 1 : 0) < 0) return -1;
@@ -5671,8 +5669,6 @@ void tFreeSMCreateStbRsp(SMCreateStbRsp *pRsp) {
}
}
-
-
int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal) {
if (tEncodeI8(pEncoder, pOffsetVal->type) < 0) return -1;
if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
@@ -5690,7 +5686,7 @@ int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal)
int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) {
if (tDecodeI8(pDecoder, &pOffsetVal->type) < 0) return -1;
- if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
+ if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
if (tDecodeI64(pDecoder, &pOffsetVal->uid) < 0) return -1;
if (tDecodeI64(pDecoder, &pOffsetVal->ts) < 0) return -1;
} else if (pOffsetVal->type == TMQ_OFFSET__LOG) {
@@ -5712,7 +5708,7 @@ int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
snprintf(buf, maxLen, "offset(reset to latest)");
} else if (pVal->type == TMQ_OFFSET__LOG) {
snprintf(buf, maxLen, "offset(log) ver:%" PRId64, pVal->version);
- } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
+ } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
snprintf(buf, maxLen, "offset(ss data) uid:%" PRId64 ", ts:%" PRId64, pVal->uid, pVal->ts);
} else {
ASSERT(0);
@@ -5813,17 +5809,17 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
return 0;
}
-int32_t tEncodeSMqMetaRsp(SEncoder* pEncoder, const SMqMetaRsp* pRsp) {
+int32_t tEncodeSMqMetaRsp(SEncoder *pEncoder, const SMqMetaRsp *pRsp) {
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
- if(tEncodeI16(pEncoder, pRsp->resMsgType)) return -1;
- if(tEncodeBinary(pEncoder, pRsp->metaRsp, pRsp->metaRspLen)) return -1;
+ if (tEncodeI16(pEncoder, pRsp->resMsgType)) return -1;
+ if (tEncodeBinary(pEncoder, pRsp->metaRsp, pRsp->metaRspLen)) return -1;
return 0;
}
-int32_t tDecodeSMqMetaRsp(SDecoder* pDecoder, SMqMetaRsp* pRsp) {
+int32_t tDecodeSMqMetaRsp(SDecoder *pDecoder, SMqMetaRsp *pRsp) {
if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
if (tDecodeI16(pDecoder, &pRsp->resMsgType) < 0) return -1;
- if (tDecodeBinaryAlloc(pDecoder, &pRsp->metaRsp, (uint64_t*)&pRsp->metaRspLen) < 0) return -1;
+ if (tDecodeBinaryAlloc(pDecoder, &pRsp->metaRsp, (uint64_t *)&pRsp->metaRspLen) < 0) return -1;
return 0;
}
@@ -5893,6 +5889,92 @@ int32_t tDecodeSMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
return 0;
}
+int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) {
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1;
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->blockNum) < 0) return -1;
+ if (pRsp->blockNum != 0) {
+ if (tEncodeI8(pEncoder, pRsp->withTbName) < 0) return -1;
+ if (tEncodeI8(pEncoder, pRsp->withSchema) < 0) return -1;
+
+ for (int32_t i = 0; i < pRsp->blockNum; i++) {
+ int32_t bLen = *(int32_t *)taosArrayGet(pRsp->blockDataLen, i);
+ void *data = taosArrayGetP(pRsp->blockData, i);
+ if (tEncodeBinary(pEncoder, (const uint8_t *)data, bLen) < 0) return -1;
+ if (pRsp->withSchema) {
+ SSchemaWrapper *pSW = (SSchemaWrapper *)taosArrayGetP(pRsp->blockSchema, i);
+ if (tEncodeSSchemaWrapper(pEncoder, pSW) < 0) return -1;
+ }
+ if (pRsp->withTbName) {
+ char *tbName = (char *)taosArrayGetP(pRsp->blockTbName, i);
+ if (tEncodeCStr(pEncoder, tbName) < 0) return -1;
+ }
+ }
+ }
+ if (tEncodeI32(pEncoder, pRsp->createTableNum) < 0) return -1;
+ if (pRsp->createTableNum) {
+ for (int32_t i = 0; i < pRsp->createTableNum; i++) {
+ void *createTableReq = taosArrayGetP(pRsp->createTableReq, i);
+ int32_t createTableLen = *(int32_t *)taosArrayGet(pRsp->createTableLen, i);
+ if (tEncodeBinary(pEncoder, createTableReq, createTableLen) < 0) return -1;
+ }
+ }
+ return 0;
+}
+
+int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) {
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->reqOffset) < 0) return -1;
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pRsp->blockNum) < 0) return -1;
+ if (pRsp->blockNum != 0) {
+ pRsp->blockData = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ pRsp->blockDataLen = taosArrayInit(pRsp->blockNum, sizeof(int32_t));
+ if (tDecodeI8(pDecoder, &pRsp->withTbName) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pRsp->withSchema) < 0) return -1;
+ if (pRsp->withTbName) {
+ pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ }
+ if (pRsp->withSchema) {
+ pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ }
+
+ for (int32_t i = 0; i < pRsp->blockNum; i++) {
+ void *data;
+ uint64_t bLen;
+ if (tDecodeBinaryAlloc(pDecoder, &data, &bLen) < 0) return -1;
+ taosArrayPush(pRsp->blockData, &data);
+ int32_t len = bLen;
+ taosArrayPush(pRsp->blockDataLen, &len);
+
+ if (pRsp->withSchema) {
+ SSchemaWrapper *pSW = (SSchemaWrapper *)taosMemoryCalloc(1, sizeof(SSchemaWrapper));
+ if (pSW == NULL) return -1;
+ if (tDecodeSSchemaWrapper(pDecoder, pSW) < 0) return -1;
+ taosArrayPush(pRsp->blockSchema, &pSW);
+ }
+
+ if (pRsp->withTbName) {
+ char *tbName;
+ if (tDecodeCStrAlloc(pDecoder, &tbName) < 0) return -1;
+ taosArrayPush(pRsp->blockTbName, &tbName);
+ }
+ }
+ }
+ if (tDecodeI32(pDecoder, &pRsp->createTableNum) < 0) return -1;
+ if (pRsp->createTableNum) {
+ pRsp->createTableLen = taosArrayInit(pRsp->createTableNum, sizeof(int32_t));
+ pRsp->createTableReq = taosArrayInit(pRsp->createTableNum, sizeof(void *));
+ for (int32_t i = 0; i < pRsp->createTableNum; i++) {
+ void *pCreate = NULL;
+ uint64_t len;
+ if (tDecodeBinaryAlloc(pDecoder, &pCreate, &len) < 0) return -1;
+ int32_t l = (int32_t)len;
+ taosArrayPush(pRsp->createTableLen, &l);
+ taosArrayPush(pRsp->createTableReq, &pCreate);
+ }
+ }
+ return 0;
+}
int32_t tEncodeSSingleDeleteReq(SEncoder *pEncoder, const SSingleDeleteReq *pReq) {
if (tEncodeI64(pEncoder, pReq->uid) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->ts) < 0) return -1;
diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c
index 10e520d9ec..1452c5ae2f 100644
--- a/source/dnode/mnode/impl/src/mndSubscribe.c
+++ b/source/dnode/mnode/impl/src/mndSubscribe.c
@@ -287,6 +287,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
if (consumerVgNum > minVgCnt) {
if (imbCnt < imbConsumerNum) {
if (consumerVgNum == minVgCnt + 1) {
+ imbCnt++;
continue;
} else {
// pop until equal minVg + 1
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index 3a3cbe72ba..5d4285b7c2 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -157,17 +157,17 @@ void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
size_t tsdbCacheGetCapacity(SVnode *pVnode);
// tq
-typedef struct SMetaTableInfo{
+typedef struct SMetaTableInfo {
int64_t suid;
int64_t uid;
SSchemaWrapper *schema;
char tbName[TSDB_TABLE_NAME_LEN];
-}SMetaTableInfo;
+} SMetaTableInfo;
-typedef struct SIdInfo{
- int64_t version;
- int32_t index;
-}SIdInfo;
+typedef struct SIdInfo {
+ int64_t version;
+ int32_t index;
+} SIdInfo;
typedef struct SSnapContext {
SMeta *pMeta;
@@ -180,8 +180,8 @@ typedef struct SSnapContext {
SArray *idList;
int32_t index;
bool withMeta;
- bool queryMetaOrData; // true-get meta, false-get data
-}SSnapContext;
+ bool queryMetaOrData; // true-get meta, false-get data
+} SSnapContext;
typedef struct STqReader {
int64_t ver;
@@ -232,11 +232,12 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWr
int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *pSnapshot);
int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData);
-int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta, SSnapContext** ctxRet);
-int32_t getMetafromSnapShot(SSnapContext* ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid);
-SMetaTableInfo getUidfromSnapShot(SSnapContext* ctx);
-int32_t setForSnapShot(SSnapContext* ctx, int64_t uid);
-int32_t destroySnapContext(SSnapContext* ctx);
+int32_t buildSnapContext(SMeta *pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta,
+ SSnapContext **ctxRet);
+int32_t getMetafromSnapShot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid);
+SMetaTableInfo getUidfromSnapShot(SSnapContext *ctx);
+int32_t setForSnapShot(SSnapContext *ctx, int64_t uid);
+int32_t destroySnapContext(SSnapContext *ctx);
// structs
struct STsdbCfg {
@@ -259,6 +260,7 @@ typedef struct {
int64_t numOfNTables;
int64_t numOfNTimeSeries;
int64_t numOfTimeSeries;
+ int64_t itvTimeSeries;
int64_t pointsWritten;
int64_t totalStorage;
int64_t compStorage;
diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h
index abfffc045f..c29c4cb6c4 100644
--- a/source/dnode/vnode/src/inc/sma.h
+++ b/source/dnode/vnode/src/inc/sma.h
@@ -95,6 +95,7 @@ struct SRSmaStat {
int64_t refId; // shared by fetch tasks
volatile int64_t nBufItems; // number of items in queue buffer
SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo)
+ volatile int32_t nFetchAll; // active number of fetch all
int8_t triggerStat; // shared by fetch tasks
int8_t commitStat; // 0 not in committing, 1 in committing
SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w)
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index 7c394c4baf..a97c8ff132 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -88,7 +88,7 @@ typedef struct {
STqExecTb execTb;
STqExecDb execDb;
};
-// int32_t numOfCols; // number of out pout column, temporarily used
+ int32_t numOfCols; // number of out pout column, temporarily used
SSchemaWrapper* pSchemaWrapper; // columns that are involved in query
} STqExecHandle;
diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c
index 9d3b4d82eb..7df355a59b 100644
--- a/source/dnode/vnode/src/meta/metaQuery.c
+++ b/source/dnode/vnode/src/meta/metaQuery.c
@@ -615,9 +615,13 @@ int64_t metaGetTbNum(SMeta *pMeta) {
// N.B. Called by statusReq per second
int64_t metaGetTimeSeriesNum(SMeta *pMeta) {
// sum of (number of columns of stable - 1) * number of ctables (excluding timestamp column)
- int64_t num = 0;
- vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
- pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
+ if (pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 || ++pMeta->pVnode->config.vndStats.itvTimeSeries % 60 == 0) {
+ int64_t num = 0;
+ vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
+ pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
+
+ pMeta->pVnode->config.vndStats.itvTimeSeries = 0;
+ }
return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries;
}
@@ -890,7 +894,7 @@ const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
#ifdef TAG_FILTER_DEBUG
if (IS_VAR_DATA_TYPE(val->type)) {
- char* buf = taosMemoryCalloc(val->nData + 1, 1);
+ char *buf = taosMemoryCalloc(val->nData + 1, 1);
memcpy(buf, val->pData, val->nData);
metaDebug("metaTag table val varchar index:%d cid:%d type:%d value:%s", 1, val->cid, val->type, buf);
taosMemoryFree(buf);
@@ -900,13 +904,13 @@ const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
metaDebug("metaTag table val number index:%d cid:%d type:%d value:%f", 1, val->cid, val->type, dval);
}
- SArray* pTagVals = NULL;
- tTagToValArray((STag*)pTag, &pTagVals);
+ SArray *pTagVals = NULL;
+ tTagToValArray((STag *)pTag, &pTagVals);
for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
- STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i);
+ STagVal *pTagVal = (STagVal *)taosArrayGet(pTagVals, i);
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
- char* buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
+ char *buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
memcpy(buf, pTagVal->pData, pTagVal->nData);
metaDebug("metaTag table varchar index:%d cid:%d type:%d value:%s", i, pTagVal->cid, pTagVal->type, buf);
taosMemoryFree(buf);
diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c
index ca5367f397..3cf50a035a 100644
--- a/source/dnode/vnode/src/sma/smaCommit.c
+++ b/source/dnode/vnode/src/sma/smaCommit.c
@@ -172,7 +172,7 @@ static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
TdDirPtr pDir = NULL;
TdDirEntryPtr pDirEntry = NULL;
char dir[TSDB_FILENAME_LEN];
- const char *pattern = "v[0-9]+qtaskinfo\\.ver([0-9]+)?$";
+ const char *pattern = "v[0-9]+qinf\\.v([0-9]+)?$";
regex_t regex;
int code = 0;
@@ -312,15 +312,22 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
SSmaStat *pStat = SMA_ENV_STAT(pEnv);
SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat);
+ int32_t nLoops = 0;
// step 1: set rsma stat
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
- atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 1);
+ while (atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 1) != 0) {
+ ++nLoops;
+ if (nLoops > 1000) {
+ sched_yield();
+ nLoops = 0;
+ }
+ }
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
ASSERT(pRSmaStat->commitAppliedVer > 0);
// step 2: wait for all triggered fetch tasks to finish
- int32_t nLoops = 0;
+ nLoops = 0;
while (1) {
if (T_REF_VAL_GET(pStat) == 0) {
smaDebug("vgId:%d, rsma commit, fetch tasks are all finished", SMA_VID(pSma));
@@ -344,7 +351,8 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
return TSDB_CODE_FAILED;
}
- smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma), (void*)taosGetSelfPthreadId());
+ smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma),
+ (void *)taosGetSelfPthreadId());
nLoops = 0;
while (atomic_load_64(&pRSmaStat->nBufItems) > 0) {
++nLoops;
@@ -359,7 +367,7 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
smaInfo("vgId:%d, rsma commit, operator state commited, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
-#if 0 // consuming task of qTaskInfo clone
+#if 0 // consuming task of qTaskInfo clone
// step 4: swap queue/qall and iQueue/iQall
// lock
// taosWLockLatch(SMA_ENV_LOCK(pEnv));
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index 426ab521fd..af41c53956 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -21,17 +21,17 @@
#define RSMA_SUBMIT_BATCH_SIZE (1024) // cnt
#define RSMA_FETCH_DELAY_MAX (900000) // ms
#define RSMA_FETCH_ACTIVE_MAX (1800) // ms
+#define RSMA_FETCH_INTERVAL (5000) // ms
SSmaMgmt smaMgmt = {
.inited = 0,
.rsetId = -1,
};
-#define TD_QTASKINFO_FNAME_PREFIX "qtaskinfo.ver"
-#define TD_RSMAINFO_DEL_FILE "rsmainfo.del"
+#define TD_QTASKINFO_FNAME_PREFIX "qinf.v"
+
typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem;
typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter;
-typedef struct SRSmaExecQItem SRSmaExecQItem;
static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid);
static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids);
@@ -82,11 +82,6 @@ struct SRSmaQTaskInfoIter {
int32_t nBufPos;
};
-struct SRSmaExecQItem {
- void *pRSmaInfo;
- void *qall;
-};
-
void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t version, char *outputName) {
tdGetVndFileName(vgId, NULL, VNODE_RSMA_DIR, TD_QTASKINFO_FNAME_PREFIX, version, outputName);
}
@@ -1501,13 +1496,13 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
switch (rsmaTriggerStat) {
case TASK_TRIGGER_STAT_PAUSED:
case TASK_TRIGGER_STAT_CANCELLED: {
- tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
smaDebug("vgId:%d, rsma fetch task not start for level %" PRIi8 " since stat is %" PRIi8
", rsetId rsetId:%" PRIi64 " refId:%d",
SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaInfo->refId);
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
- taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+ taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
}
+ tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
return;
}
default:
@@ -1518,7 +1513,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE);
switch (fetchTriggerStat) {
case TASK_TRIGGER_STAT_ACTIVE: {
- smaDebug("vgId:%d, rsma fetch task started for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
+ smaDebug("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
// async process
pItem->fetchLevel = pItem->level;
@@ -1531,8 +1526,6 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
if (atomic_load_8(&pRSmaInfo->assigned) == 0) {
tsem_post(&(pStat->notEmpty));
}
- smaInfo("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64, SMA_VID(pSma), pItem->level,
- pRSmaInfo->suid);
} break;
case TASK_TRIGGER_STAT_PAUSED: {
smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is paused",
@@ -1715,15 +1708,30 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
smaDebug("vgId:%d, batchSize:%d, execType:%" PRIi8, SMA_VID(pSma), qallItemSize, type);
}
- if (type == RSMA_EXEC_OVERFLOW) {
+ int8_t oldStat = atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 2);
+ if (oldStat == 0 ||
+ ((oldStat == 2) && atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat)) < TASK_TRIGGER_STAT_PAUSED)) {
+ atomic_fetch_add_32(&pRSmaStat->nFetchAll, 1);
tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
+ if (0 == atomic_sub_fetch_32(&pRSmaStat->nFetchAll, 1)) {
+ atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0);
+ }
}
if (qallItemSize > 0) {
atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize);
continue;
} else if (RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
- continue;
+ if (atomic_load_8(RSMA_COMMIT_STAT(pRSmaStat)) == 0) {
+ continue;
+ }
+ for (int32_t j = 0; j < TSDB_RETENTION_L2; ++j) {
+ SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, j);
+ if (pItem->fetchLevel) {
+ pItem->fetchLevel = 0;
+ taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+ }
+ }
}
break;
@@ -1775,7 +1783,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
if (pEnv->flag & SMA_ENV_FLG_CLOSE) {
break;
}
-
+
tsem_wait(&pRSmaStat->notEmpty);
if ((pEnv->flag & SMA_ENV_FLG_CLOSE) && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) {
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 26db68a1d4..54f764c6b3 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -596,7 +596,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
req.qmsg = NULL;
pHandle->execHandle.task =
- qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, NULL,
+ qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols,
&pHandle->execHandle.pSchemaWrapper);
ASSERT(pHandle->execHandle.task);
void* scanner = NULL;
diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c
index e21125f3a4..a0b8141cfb 100644
--- a/source/dnode/vnode/src/tq/tqExec.c
+++ b/source/dnode/vnode/src/tq/tqExec.c
@@ -110,7 +110,12 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp*
taosArrayPush(pRsp->blockSchema, &pSW);
}
}
- tqAddBlockDataToRsp(pDataBlock, pRsp, taosArrayGetSize(pDataBlock->pDataBlock));
+
+ if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN){
+ tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
+ }else{
+ tqAddBlockDataToRsp(pDataBlock, pRsp, taosArrayGetSize(pDataBlock->pDataBlock));
+ }
pRsp->blockNum++;
if (pOffset->type == TMQ_OFFSET__LOG) {
continue;
diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c
index 6b6717ff57..a192d1f863 100644
--- a/source/dnode/vnode/src/tq/tqMeta.c
+++ b/source/dnode/vnode/src/tq/tqMeta.c
@@ -260,7 +260,7 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
handle.execHandle.task = qCreateQueueExecTaskInfo(
- handle.execHandle.execCol.qmsg, &reader, NULL, &handle.execHandle.pSchemaWrapper);
+ handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols, &handle.execHandle.pSchemaWrapper);
ASSERT(handle.execHandle.task);
void* scanner = NULL;
qExtractStreamScanner(handle.execHandle.task, &scanner);
diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c
index 7d259fe06c..18d839e109 100644
--- a/source/libs/command/src/command.c
+++ b/source/libs/command/src/command.c
@@ -471,6 +471,7 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p
len += sprintf(buf2 + VARSTR_HEADER_SIZE, "CREATE TABLE `%s` (", tbName);
appendColumnFields(buf2, &len, pCfg);
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, ")");
+ appendTableOptions(buf2, &len, pDbCfg, pCfg);
}
varDataLen(buf2) = len;
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index 7eb02308de..f0518a72ab 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -303,6 +303,7 @@ typedef struct SAggSupporter {
char* keyBuf; // window key buffer
SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ int32_t currentPageId; // current write page id
} SAggSupporter;
typedef struct {
@@ -327,7 +328,6 @@ typedef struct STableScanInfo {
SQueryTableDataCond cond;
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
int32_t dataBlockLoadFlag;
-// SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
SSampleExecInfo sample; // sample execution info
int32_t currentGroupId;
int32_t currentTable;
@@ -431,6 +431,7 @@ typedef struct SStreamAggSupporter {
char* pKeyBuf; // window key buffer
SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ int32_t currentPageId; // buffer page that is active
SSDataBlock* pScanBlock;
} SStreamAggSupporter;
@@ -1009,7 +1010,7 @@ int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimary
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
int32_t size);
-SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize);
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize);
SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex);
SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index 3b3ef9e3de..80c1494f8d 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -46,8 +46,8 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
rowSize += pCtx[i].resDataInfo.interBufSize;
}
- rowSize +=
- (numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(doSaveTupleData)
+ rowSize += (numOfOutput * sizeof(bool));
+ // expand rowSize to mark if col is null for top/bottom result(saveTupleData)
return rowSize;
}
@@ -1178,7 +1178,6 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
SqlFunctionCtx* pCtx = &pFuncCtx[i];
pCtx->functionId = -1;
- pCtx->curBufPage = -1;
pCtx->pExpr = pExpr;
if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) {
@@ -1222,6 +1221,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
pCtx->isStream = false;
pCtx->param = pFunct->pParam;
+ pCtx->saveHandle.currentPage = -1;
}
for (int32_t i = 1; i < numOfOutput; ++i) {
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index 271a65647d..124f4b44b0 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -177,13 +177,13 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* n
// extract the number of output columns
SDataBlockDescNode* pDescNode = pPlan->pNode->pOutputDataBlockDesc;
- if(numOfCols) *numOfCols = 0;
+ *numOfCols = 0;
SNode* pNode;
FOREACH(pNode, pDescNode->pSlots) {
SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode;
if (pSlotDesc->output) {
- if(numOfCols) ++(*numOfCols);
+ ++(*numOfCols);
}
}
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index e79a9fa16e..4ffa80d468 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -179,26 +179,23 @@ static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pR
}
#endif
-SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) {
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize) {
SFilePage* pData = NULL;
// in the first scan, new space needed for results
int32_t pageId = -1;
- SIDList list = getDataBufPagesIdList(pResultBuf);
-
- if (taosArrayGetSize(list) == 0) {
- pData = getNewBufPage(pResultBuf, tableGroupId, &pageId);
+ if (*currentPageId == -1) {
+ pData = getNewBufPage(pResultBuf, &pageId);
pData->num = sizeof(SFilePage);
} else {
- SPageInfo* pi = getLastPageInfo(list);
- pData = getBufPage(pResultBuf, getPageId(pi));
- pageId = getPageId(pi);
+ pData = getBufPage(pResultBuf, *currentPageId);
+ pageId = *currentPageId;
if (pData->num + interBufSize > getBufPageSize(pResultBuf)) {
// release current page first, and prepare the next one
- releaseBufPageInfo(pResultBuf, pi);
+ releaseBufPage(pResultBuf, pData);
- pData = getNewBufPage(pResultBuf, tableGroupId, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
if (pData != NULL) {
pData->num = sizeof(SFilePage);
}
@@ -215,9 +212,9 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int
SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num);
pResultRow->pageId = pageId;
pResultRow->offset = (int32_t)pData->num;
+ *currentPageId = pageId;
pData->num += interBufSize;
-
return pResultRow;
}
@@ -263,11 +260,8 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// allocate a new buffer page
if (pResult == NULL) {
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_2");
-#endif
ASSERT(pSup->resultRowSize > 0);
- pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize);
+ pResult = getNewResultRow(pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
initResultRow(pResult);
@@ -302,7 +296,7 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes
SIDList list = getDataBufPagesIdList(pResultBuf);
if (taosArrayGetSize(list) == 0) {
- pData = getNewBufPage(pResultBuf, tid, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
pData->num = sizeof(SFilePage);
} else {
SPageInfo* pi = getLastPageInfo(list);
@@ -313,7 +307,7 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes
// release current page first, and prepare the next one
releaseBufPageInfo(pResultBuf, pi);
- pData = getNewBufPage(pResultBuf, tid, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
if (pData != NULL) {
pData->num = sizeof(SFilePage);
}
@@ -3092,7 +3086,7 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) {
offset += sizeof(int32_t);
uint64_t tableGroupId = *(uint64_t*)(result + offset);
- SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, tableGroupId, pSup->resultRowSize);
+ SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
if (!resultRow) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
@@ -3440,8 +3434,10 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul
int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
const char* pKey) {
+ int32_t code = 0;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ pAggSup->currentPageId = -1;
pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t));
pAggSup->pResultRowHashTable = tSimpleHashInit(10, hashFn);
@@ -3455,18 +3451,18 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
if (!osTempSpaceAvailable()) {
- terrno = TSDB_CODE_NO_AVAIL_DISK;
- qError("Init stream agg supporter failed since %s", terrstr(terrno));
- return terrno;
- }
-
- int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
- if (code != TSDB_CODE_SUCCESS) {
- qError("Create agg result buf failed since %s", tstrerror(code));
+ code = TSDB_CODE_NO_AVAIL_DISK;
+ qError("Init stream agg supporter failed since %s, %s", terrstr(code), pKey);
return code;
}
- return TSDB_CODE_SUCCESS;
+ code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("Create agg result buf failed since %s, %s", tstrerror(code), pKey);
+ return code;
+ }
+
+ return code;
}
void cleanupAggSup(SAggSupporter* pAggSup) {
@@ -3488,7 +3484,7 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf
}
for (int32_t i = 0; i < numOfCols; ++i) {
- pSup->pCtx[i].pBuf = pAggSup->pResultBuf;
+ pSup->pCtx[i].saveHandle.pBuf = pAggSup->pResultBuf;
}
return TSDB_CODE_SUCCESS;
@@ -3520,6 +3516,7 @@ void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
}
taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx);
+ taosMemoryFreeClear(pCtx[i].subsidiaries.buf);
taosMemoryFree(pCtx[i].input.pData);
taosMemoryFree(pCtx[i].input.pColumnDataAgg);
}
@@ -4678,6 +4675,7 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInf
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
int32_t size) {
+ pSup->currentPageId = -1;
pSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pSup->keySize = sizeof(int64_t) + sizeof(TSKEY);
pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize);
@@ -4705,7 +4703,8 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlF
}
int32_t code = createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, tsTempDir);
for (int32_t i = 0; i < numOfOutput; ++i) {
- pCtx[i].pBuf = pSup->pResultBuf;
+ pCtx[i].saveHandle.pBuf = pSup->pResultBuf;
}
+
return code;
}
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index 9d7e833b19..5d123f723e 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -547,7 +547,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
p = taosHashGet(pInfo->pGroupSet, pInfo->keyBuf, len);
int32_t pageId = 0;
- pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
+ pPage = getNewBufPage(pInfo->pBuf, &pageId);
taosArrayPush(p->pPageList, &pageId);
*(int32_t *) pPage = 0;
@@ -562,7 +562,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
// add a new page for current group
int32_t pageId = 0;
- pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
+ pPage = getNewBufPage(pInfo->pBuf, &pageId);
taosArrayPush(p->pPageList, &pageId);
memset(pPage, 0, getBufPageSize(pInfo->pBuf));
}
diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c
index 0661ccd390..2f12a0d19b 100644
--- a/source/libs/executor/src/projectoperator.c
+++ b/source/libs/executor/src/projectoperator.c
@@ -195,16 +195,6 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS
return PROJECT_RETRIEVE_DONE;
}
-void printDataBlock1(SSDataBlock* pBlock, const char* flag) {
- if (!pBlock || pBlock->info.rows == 0) {
- qDebug("===stream===printDataBlock: Block is Null or Empty");
- return;
- }
- char* pBuf = NULL;
- qDebug("%s", dumpBlockData(pBlock, flag, &pBuf));
- taosMemoryFreeClear(pBuf);
-}
-
SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
SProjectOperatorInfo* pProjectInfo = pOperator->info;
SOptrBasicInfo* pInfo = &pProjectInfo->binfo;
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index b97970aeef..152bd5939d 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -1828,12 +1828,6 @@ static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SInt
return needed;
}
-void increaseTs(SqlFunctionCtx* pCtx) {
- if (pCtx[0].pExpr->pExpr->_function.pFunctNode->funcType == FUNCTION_TYPE_WSTART) {
-// pCtx[0].increase = true;
- }
-}
-
void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSupporter* pSup) {
if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
// Todo(liuyao) support partition by column
@@ -1895,7 +1889,6 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
if (isStream) {
ASSERT(numOfCols > 0);
- increaseTs(pSup->pCtx);
initStreamFunciton(pSup->pCtx, pSup->numOfExprs);
}
@@ -3050,6 +3043,7 @@ static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo)
tSimpleHashClear(pInfo->aggSup.pResultRowHashTable);
clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
+ pInfo->aggSup.currentPageId = -1;
}
static void clearSpecialDataBlock(SSDataBlock* pBlock) {
@@ -3420,7 +3414,6 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
initBasicInfo(&pInfo->binfo, pResBlock);
ASSERT(numOfCols > 0);
- increaseTs(pOperator->exprSupp.pCtx);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
@@ -3451,6 +3444,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
// semi interval operator does not catch result
pInfo->isFinal = false;
pOperator->name = "StreamSemiIntervalOperator";
+ ASSERT(pInfo->aggSup.currentPageId == -1);
}
if (!IS_FINAL_OP(pInfo) || numOfChild == 0) {
@@ -3559,11 +3553,10 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo*
initBasicInfo(pBasicInfo, pResultBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
- pSup->pCtx[i].pBuf = NULL;
+ pSup->pCtx[i].saveHandle.pBuf = NULL;
}
ASSERT(numOfCols > 0);
- increaseTs(pSup->pCtx);
return TSDB_CODE_SUCCESS;
}
@@ -3820,7 +3813,7 @@ static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pRes
}
if (pWinInfo->pos.pageId == -1) {
- *pResult = getNewResultRow(pAggSup->pResultBuf, groupId, pAggSup->resultRowSize);
+ *pResult = getNewResultRow(pAggSup->pResultBuf, &pAggSup->currentPageId, pAggSup->resultRowSize);
if (*pResult == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -4337,6 +4330,7 @@ static void clearStreamSessionOperator(SStreamSessionAggOperatorInfo* pInfo) {
}
}
clearDiskbasedBuf(pInfo->streamAggSup.pResultBuf);
+ pInfo->streamAggSup.currentPageId = -1;
}
static void removeSessionResults(SHashObj* pHashMap, SArray* pWins) {
diff --git a/source/libs/executor/src/tlinearhash.c b/source/libs/executor/src/tlinearhash.c
index e0752840db..cffabcb6ac 100644
--- a/source/libs/executor/src/tlinearhash.c
+++ b/source/libs/executor/src/tlinearhash.c
@@ -97,7 +97,7 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t
// allocate the overflow buffer page to hold this k/v.
int32_t newPageId = -1;
- SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, 0, &newPageId);
+ SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, &newPageId);
if (pNewPage == NULL) {
return terrno;
}
@@ -227,7 +227,7 @@ static int32_t doAddNewBucket(SLHashObj* pHashObj) {
}
int32_t pageId = -1;
- SFilePage* p = getNewBufPage(pHashObj->pBuf, 0, &pageId);
+ SFilePage* p = getNewBufPage(pHashObj->pBuf, &pageId);
if (p == NULL) {
return terrno;
}
diff --git a/source/libs/executor/src/tsimplehash.c b/source/libs/executor/src/tsimplehash.c
index 8cd376e092..84b615af7a 100644
--- a/source/libs/executor/src/tsimplehash.c
+++ b/source/libs/executor/src/tsimplehash.c
@@ -295,11 +295,7 @@ int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t ke
}
if (*pIter == (void *)GET_SHASH_NODE_DATA(pNode)) {
- if (!pPrev) {
- *pIter = NULL;
- } else {
- *pIter = GET_SHASH_NODE_DATA(pPrev);
- }
+ *pIter = pPrev ? GET_SHASH_NODE_DATA(pPrev) : NULL;
}
FREE_HASH_NODE(pNode);
diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c
index fc411e850a..168cd21c44 100644
--- a/source/libs/executor/src/tsort.c
+++ b/source/libs/executor/src/tsort.c
@@ -180,7 +180,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
}
int32_t pageId = -1;
- void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId);
+ void* pPage = getNewBufPage(pHandle->pBuf, &pageId);
if (pPage == NULL) {
blockDataDestroy(p);
return terrno;
@@ -512,7 +512,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
}
int32_t pageId = -1;
- void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId);
+ void* pPage = getNewBufPage(pHandle->pBuf, &pageId);
if (pPage == NULL) {
return terrno;
}
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index 32d0472a50..b71d06231e 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -1146,8 +1146,9 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
return true;
}
-static void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
-static void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
+static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock);
+static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
+static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos);
static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) {
// the data is loaded, not only the block SMA value
@@ -1199,7 +1200,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
pBuf->v = *(int64_t*)tval;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
} else {
if (IS_SIGNED_NUMERIC_TYPE(type)) {
@@ -1211,7 +1212,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(int64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
@@ -1224,7 +1225,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(uint64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
@@ -1236,7 +1237,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(double*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
} else if (type == TSDB_DATA_TYPE_FLOAT) {
@@ -1250,7 +1251,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
}
@@ -1275,7 +1276,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1287,7 +1288,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1306,7 +1307,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1318,7 +1319,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1337,7 +1338,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1349,7 +1350,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1368,7 +1369,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1380,7 +1381,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1401,7 +1402,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1413,7 +1414,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1432,7 +1433,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1444,7 +1445,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1463,7 +1464,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1475,7 +1476,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1494,7 +1495,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1506,7 +1507,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1526,7 +1527,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1538,7 +1539,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1557,7 +1558,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1569,7 +1570,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1580,7 +1581,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
_min_max_over:
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved ) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos);
+ pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pBuf->nullTupleSaved = true;
}
return numOfElems;
@@ -1599,8 +1600,7 @@ int32_t maxFunction(SqlFunctionCtx* pCtx) {
}
static void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex);
-
-static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rIndex);
+static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rowIndex);
int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
@@ -1648,34 +1648,29 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple
return;
}
- int32_t pageId = pTuplePos->pageId;
- int32_t offset = pTuplePos->offset;
+ if (pCtx->saveHandle.pBuf != NULL) {
+ if (pTuplePos->pageId != -1) {
+ int32_t numOfCols = pCtx->subsidiaries.num;
+ const char* p = loadTupleData(pCtx, pTuplePos);
- if (pTuplePos->pageId != -1) {
- int32_t numOfCols = pCtx->subsidiaries.num;
- SFilePage* pPage = getBufPage(pCtx->pBuf, pageId);
+ bool* nullList = (bool*)p;
+ char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
- bool* nullList = (bool*)((char*)pPage + offset);
- char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
+ // todo set the offset value to optimize the performance.
+ for (int32_t j = 0; j < numOfCols; ++j) {
+ SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
+ int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
- // todo set the offset value to optimize the performance.
- for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
-
- SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
- int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
-
- SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
- ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
- if (nullList[j]) {
- colDataAppendNULL(pDstCol, rowIndex);
- } else {
- colDataAppend(pDstCol, rowIndex, pStart, false);
+ SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
+ ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
+ if (nullList[j]) {
+ colDataAppendNULL(pDstCol, rowIndex);
+ } else {
+ colDataAppend(pDstCol, rowIndex, pStart, false);
+ }
+ pStart += pDstCol->info.bytes;
}
- pStart += pDstCol->info.bytes;
}
-
- releaseBufPage(pCtx->pBuf, pPage);
}
}
@@ -2756,15 +2751,15 @@ static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowInde
return *(TSKEY*)colDataGetData(pTsColInfo, rowIndex);
}
-static void saveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) {
+static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) {
if (pCtx->subsidiaries.num <= 0) {
return;
}
if (!pInfo->hasResult) {
- doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
+ pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock);
} else {
- doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
+ updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
}
}
@@ -2778,7 +2773,7 @@ static void doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t cur
memcpy(pInfo->buf, pData, pInfo->bytes);
pInfo->ts = currentTs;
- saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
+ firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
pInfo->hasResult = true;
}
@@ -2982,7 +2977,7 @@ static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, S
pOutput->bytes = pInput->bytes;
memcpy(pOutput->buf, pInput->buf, pOutput->bytes);
- saveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
+ firstlastSaveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
pOutput->hasResult = true;
}
@@ -3087,7 +3082,7 @@ static void doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex, i
}
pInfo->ts = cts;
- saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
+ firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
pInfo->hasResult = true;
}
@@ -3420,7 +3415,7 @@ int32_t topFunction(SqlFunctionCtx* pCtx) {
}
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
+ pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pRes->nullTupleSaved = true;
}
return TSDB_CODE_SUCCESS;
@@ -3448,7 +3443,7 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) {
}
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
+ pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pRes->nullTupleSaved = true;
}
@@ -3500,7 +3495,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
// save the data of this tuple
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
+ pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId,
@@ -3524,7 +3519,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
// save the data of this tuple by over writing the old data
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
+ updateTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset);
@@ -3541,38 +3536,13 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
* |(n columns, one bit for each column)| src column #1| src column #2|
* +------------------------------------+--------------+--------------+
*/
-void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
- SFilePage* pPage = NULL;
+void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsidiaryResInfo* pSubsidiaryies, char* buf) {
+ char* nullList = buf;
+ char* pStart = (char*)(nullList + sizeof(bool) * pSubsidiaryies->num);
- // todo refactor: move away
- int32_t completeRowSize = pCtx->subsidiaries.num * sizeof(bool);
- for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
- completeRowSize += pc->pExpr->base.resSchema.bytes;
- }
-
- if (pCtx->curBufPage == -1) {
- pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
- pPage->num = sizeof(SFilePage);
- } else {
- pPage = getBufPage(pCtx->pBuf, pCtx->curBufPage);
- if (pPage->num + completeRowSize > getBufPageSize(pCtx->pBuf)) {
- // current page is all used, let's prepare a new buffer page
- releaseBufPage(pCtx->pBuf, pPage);
- pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
- pPage->num = sizeof(SFilePage);
- }
- }
-
- pPos->pageId = pCtx->curBufPage;
- pPos->offset = pPage->num;
-
- // keep the current row data, extract method
int32_t offset = 0;
- bool* nullList = (bool*)((char*)pPage + pPage->num);
- char* pStart = (char*)(nullList + sizeof(bool) * pCtx->subsidiaries.num);
- for (int32_t i = 0; i < pCtx->subsidiaries.num; ++i) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
+ for (int32_t i = 0; i < pSubsidiaryies->num; ++i) {
+ SqlFunctionCtx* pc = pSubsidiaryies->pCtx[i];
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
int32_t srcSlotId = pFuncParam->pCol->slotId;
@@ -3593,50 +3563,88 @@ void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock*
offset += pCol->info.bytes;
}
- pPage->num += completeRowSize;
-
- setBufPageDirty(pPage, true);
- releaseBufPage(pCtx->pBuf, pPage);
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, pPos->offset);
-#endif
+ return buf;
}
-void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
- SFilePage* pPage = getBufPage(pCtx->pBuf, pPos->pageId);
+static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length) {
+ STuplePos p = {0};
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = NULL;
- int32_t numOfCols = pCtx->subsidiaries.num;
-
- bool* nullList = (bool*)((char*)pPage + pPos->offset);
- char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
-
- int32_t offset = 0;
- for (int32_t i = 0; i < numOfCols; ++i) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
- SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
- int32_t srcSlotId = pFuncParam->pCol->slotId;
-
- SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId);
- if ((nullList[i] = colDataIsNull_s(pCol, rowIndex)) == true) {
- offset += pCol->info.bytes;
- continue;
- }
-
- char* p = colDataGetData(pCol, rowIndex);
- if (IS_VAR_DATA_TYPE(pCol->info.type)) {
- memcpy(pStart + offset, p, (pCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(p) : varDataTLen(p));
+ if (pHandle->currentPage == -1) {
+ pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
+ pPage->num = sizeof(SFilePage);
} else {
- memcpy(pStart + offset, p, pCol->info.bytes);
+ pPage = getBufPage(pHandle->pBuf, pHandle->currentPage);
+ if (pPage->num + length > getBufPageSize(pHandle->pBuf)) {
+ // current page is all used, let's prepare a new buffer page
+ releaseBufPage(pHandle->pBuf, pPage);
+ pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
+ pPage->num = sizeof(SFilePage);
+ }
}
- offset += pCol->info.bytes;
+ p = (STuplePos) {.pageId = pHandle->currentPage, .offset = pPage->num};
+ memcpy(pPage->data + pPage->num, pBuf, length);
+
+ pPage->num += length;
+ setBufPageDirty(pPage, true);
+ releaseBufPage(pHandle->pBuf, pPage);
+ } else {
+ // other tuple save policy
}
- setBufPageDirty(pPage, true);
- releaseBufPage(pCtx->pBuf, pPage);
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_copyTuple pos:%p, pageId:%d, offset:%d", pPos, pPos->pageId, pPos->offset);
-#endif
+ return p;
+}
+
+STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock) {
+ if (pCtx->subsidiaries.rowLen == 0) {
+ int32_t rowLen = 0;
+ for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
+ SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
+ rowLen += pc->pExpr->base.resSchema.bytes;
+ }
+
+ pCtx->subsidiaries.rowLen = rowLen + pCtx->subsidiaries.num * sizeof(bool);
+ pCtx->subsidiaries.buf = taosMemoryMalloc(pCtx->subsidiaries.rowLen);
+ }
+
+ char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
+ return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen);
+}
+
+static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) {
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
+ memcpy(pPage->data + pPos->offset, pBuf, length);
+ setBufPageDirty(pPage, true);
+ releaseBufPage(pHandle->pBuf, pPage);
+ } else {
+
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
+ char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
+ doUpdateTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pPos);
+ return TSDB_CODE_SUCCESS;
+}
+
+static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPos) {
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
+ char* p = pPage->data + pPos->offset;
+ releaseBufPage(pHandle->pBuf, pPage);
+ return p;
+ } else {
+ return NULL;
+ }
+}
+
+static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos) {
+ return doLoadTupleData(&pCtx->saveHandle, pPos);
}
int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
@@ -3788,8 +3796,6 @@ int32_t spreadFunction(SqlFunctionCtx* pCtx) {
SColumnInfoData* pCol = pInput->pData[0];
int32_t start = pInput->startRowIndex;
- int32_t numOfRows = pInput->numOfRows;
-
// check the valid data one by one
for (int32_t i = start; i < pInput->numOfRows + start; ++i) {
if (colDataIsNull_f(pCol->nullbitmap, i)) {
@@ -4964,7 +4970,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
if (pInfo->numSampled < pInfo->samples) {
sampleAssignResult(pInfo, data, pInfo->numSampled);
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[pInfo->numSampled]);
+ pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
pInfo->numSampled++;
} else {
@@ -4972,7 +4978,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
if (j < pInfo->samples) {
sampleAssignResult(pInfo, data, j);
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
+ updateTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
}
}
}
@@ -4995,7 +5001,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
}
if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pInfo->nullTuplePos);
+ pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pInfo->nullTupleSaved = true;
}
diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c
index dbe0b6bb3a..4c58c0abe5 100644
--- a/source/libs/function/src/tpercentile.c
+++ b/source/libs/function/src/tpercentile.c
@@ -372,7 +372,7 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
pPageIdList = pList;
}
- pSlot->info.data = getNewBufPage(pBucket->pBuffer, groupId, &pageId);
+ pSlot->info.data = getNewBufPage(pBucket->pBuffer, &pageId);
pSlot->info.pageId = pageId;
taosArrayPush(pPageIdList, &pageId);
}
diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c
index 56fbafe76d..049d1ef545 100644
--- a/source/libs/parser/src/parInsert.c
+++ b/source/libs/parser/src/parInsert.c
@@ -1669,6 +1669,10 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
pDb = taosHashIterate(context.pDbFNameHashObj, pDb);
}
}
+ if (pContext->pStmtCb) {
+ context.pVgroupsHashObj = NULL;
+ context.pTableBlockHashObj = NULL;
+ }
destroyInsertParseContext(&context);
return code;
}
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index b6b677858d..51a4295ce5 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -5053,7 +5053,7 @@ static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pSt
return TSDB_CODE_SUCCESS;
}
- if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) ||
+ if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) || NULL == ((SSelectStmt*)pStmt->pQuery)->pFromTable ||
QUERY_NODE_REAL_TABLE != nodeType(((SSelectStmt*)pStmt->pQuery)->pFromTable)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query");
}
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 7e27132f3c..7ee6a5b223 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -136,8 +136,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
}
static EDealRes rewriteQueryExprAliasImpl(SNode* pNode, void* pContext) {
- if (nodesIsExprNode(pNode) && QUERY_NODE_COLUMN != nodeType(pNode) && '\0' == ((SExprNode*)pNode)->userAlias[0]) {
- strcpy(((SExprNode*)pNode)->userAlias, ((SExprNode*)pNode)->aliasName);
+ if (nodesIsExprNode(pNode) && QUERY_NODE_COLUMN != nodeType(pNode)) {
sprintf(((SExprNode*)pNode)->aliasName, "#%d", *(int32_t*)pContext);
++(*(int32_t*)pContext);
}
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index 0667c5f5b9..bf72f52105 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -197,28 +197,21 @@ static EScanType getScanType(SLogicPlanContext* pCxt, SNodeList* pScanPseudoCols
return SCAN_TYPE_TABLE;
}
-static SNode* createPrimaryKeyCol(uint64_t tableId) {
+static SNode* createFirstCol(uint64_t tableId, const SSchema* pSchema) {
SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
if (NULL == pCol) {
return NULL;
}
- pCol->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP;
- pCol->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes;
+ pCol->node.resType.type = pSchema->type;
+ pCol->node.resType.bytes = pSchema->bytes;
pCol->tableId = tableId;
- pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ pCol->colId = pSchema->colId;
pCol->colType = COLUMN_TYPE_COLUMN;
- strcpy(pCol->colName, "#primarykey");
+ strcpy(pCol->colName, pSchema->name);
return (SNode*)pCol;
}
-static int32_t addPrimaryKeyCol(uint64_t tableId, SNodeList** pCols) {
- if (NULL == *pCols) {
- *pCols = nodesMakeList();
- if (NULL == *pCols) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- }
-
+static int32_t addPrimaryKeyCol(uint64_t tableId, const SSchema* pSchema, SNodeList** pCols) {
bool found = false;
SNode* pCol = NULL;
FOREACH(pCol, *pCols) {
@@ -229,13 +222,25 @@ static int32_t addPrimaryKeyCol(uint64_t tableId, SNodeList** pCols) {
}
if (!found) {
- if (TSDB_CODE_SUCCESS != nodesListStrictAppend(*pCols, createPrimaryKeyCol(tableId))) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
+ return nodesListMakeStrictAppend(pCols, createFirstCol(tableId, pSchema));
}
return TSDB_CODE_SUCCESS;
}
+static int32_t addSystableFirstCol(uint64_t tableId, const SSchema* pSchema, SNodeList** pCols) {
+ if (LIST_LENGTH(*pCols) > 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+ return nodesListMakeStrictAppend(pCols, createFirstCol(tableId, pSchema));
+}
+
+static int32_t addDefaultScanCol(const STableMeta* pMeta, SNodeList** pCols) {
+ if (TSDB_SYSTEM_TABLE == pMeta->tableType) {
+ return addSystableFirstCol(pMeta->uid, pMeta->schema, pCols);
+ }
+ return addPrimaryKeyCol(pMeta->uid, pMeta->schema, pCols);
+}
+
static int32_t makeScanLogicNode(SLogicPlanContext* pCxt, SRealTableNode* pRealTable, bool hasRepeatScanFuncs,
SLogicNode** pLogicNode) {
SScanLogicNode* pScan = (SScanLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_SCAN);
@@ -299,8 +304,8 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
pScan->hasNormalCols = true;
}
- if (TSDB_CODE_SUCCESS == code && SCAN_TYPE_SYSTEM_TABLE != pScan->scanType) {
- code = addPrimaryKeyCol(pScan->tableId, &pScan->pScanCols);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = addDefaultScanCol(pRealTable->pMeta, &pScan->pScanCols);
}
// set output
@@ -787,10 +792,8 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele
static EDealRes needFillValueImpl(SNode* pNode, void* pContext) {
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
SColumnNode* pCol = (SColumnNode*)pNode;
- if (COLUMN_TYPE_WINDOW_START != pCol->colType &&
- COLUMN_TYPE_WINDOW_END != pCol->colType &&
- COLUMN_TYPE_WINDOW_DURATION != pCol->colType &&
- COLUMN_TYPE_GROUP_KEY != pCol->colType) {
+ if (COLUMN_TYPE_WINDOW_START != pCol->colType && COLUMN_TYPE_WINDOW_END != pCol->colType &&
+ COLUMN_TYPE_WINDOW_DURATION != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) {
*(bool*)pContext = true;
return DEAL_RES_END;
}
@@ -1008,7 +1011,8 @@ static int32_t createPartitionLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pS
int32_t code =
nodesCollectColumns(pSelect, SQL_CLAUSE_PARTITION_BY, NULL, COLLECT_COL_TYPE_ALL, &pPartition->node.pTargets);
if (TSDB_CODE_SUCCESS == code && NULL == pPartition->node.pTargets) {
- code = nodesListMakeStrictAppend(&pPartition->node.pTargets, nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)));
+ code = nodesListMakeStrictAppend(&pPartition->node.pTargets,
+ nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)));
}
if (TSDB_CODE_SUCCESS == code) {
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index 120b33dd13..c2f1d71b18 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -1084,7 +1084,7 @@ static int32_t sortPriKeyOptGetSequencingNodesImpl(SLogicNode* pNode, bool* pNot
switch (nodeType(pNode)) {
case QUERY_NODE_LOGIC_PLAN_SCAN: {
SScanLogicNode* pScan = (SScanLogicNode*)pNode;
- if (NULL != pScan->pGroupTags) {
+ if (NULL != pScan->pGroupTags || TSDB_SYSTEM_TABLE == pScan->tableType) {
*pNotOptimize = true;
return TSDB_CODE_SUCCESS;
}
diff --git a/source/libs/planner/test/planSysTbTest.cpp b/source/libs/planner/test/planSysTbTest.cpp
index 921f86f09a..6b40e381cc 100644
--- a/source/libs/planner/test/planSysTbTest.cpp
+++ b/source/libs/planner/test/planSysTbTest.cpp
@@ -32,3 +32,9 @@ TEST_F(PlanSysTableTest, informationSchema) {
run("SELECT * FROM information_schema.ins_databases WHERE name = 'information_schema'");
}
+
+TEST_F(PlanSysTableTest, withAgg) {
+ useDb("root", "information_schema");
+
+ run("SELECT COUNT(1) FROM ins_users");
+}
diff --git a/source/libs/scalar/inc/filterInt.h b/source/libs/scalar/inc/filterInt.h
index 23693c785a..e7695b2f04 100644
--- a/source/libs/scalar/inc/filterInt.h
+++ b/source/libs/scalar/inc/filterInt.h
@@ -276,7 +276,7 @@ struct SFilterInfo {
#define FILTER_CLR_FLAG(st, f) st &= (~f)
#define SIMPLE_COPY_VALUES(dst, src) *((int64_t *)dst) = *((int64_t *)src)
-#define FILTER_PACKAGE_UNIT_HASH_KEY(v, optr, idx1, idx2) do { char *_t = (char *)v; _t[0] = optr; *(uint32_t *)(_t + 1) = idx1; *(uint32_t *)(_t + 3) = idx2; } while (0)
+#define FLT_PACKAGE_UNIT_HASH_KEY(v, op1, op2, lidx, ridx, ridx2) do { char *_t = (char *)(v); _t[0] = (op1); _t[1] = (op2); *(uint32_t *)(_t + 2) = (lidx); *(uint32_t *)(_t + 2 + sizeof(uint32_t)) = (ridx); } while (0)
#define FILTER_GREATER(cr,sflag,eflag) ((cr > 0) || ((cr == 0) && (FILTER_GET_FLAG(sflag,RANGE_FLG_EXCLUDE) || FILTER_GET_FLAG(eflag,RANGE_FLG_EXCLUDE))))
#define FILTER_COPY_RA(dst, src) do { (dst)->sflag = (src)->sflag; (dst)->eflag = (src)->eflag; (dst)->s = (src)->s; (dst)->e = (src)->e; } while (0)
@@ -350,6 +350,7 @@ struct SFilterInfo {
extern bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right);
extern __compar_fn_t filterGetCompFunc(int32_t type, int32_t optr);
+extern __compar_fn_t filterGetCompFuncEx(int32_t lType, int32_t rType, int32_t optr);
#ifdef __cplusplus
}
diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h
index 36d2c5a49c..15e9026ddb 100644
--- a/source/libs/scalar/inc/sclInt.h
+++ b/source/libs/scalar/inc/sclInt.h
@@ -47,6 +47,7 @@ typedef struct SScalarCtx {
#define SCL_IS_NULL_VALUE_NODE(_node) ((QUERY_NODE_VALUE == nodeType(_node)) && (TSDB_DATA_TYPE_NULL == ((SValueNode *)_node)->node.resType.type))
#define SCL_IS_COMPARISON_OPERATOR(_opType) ((_opType) >= OP_TYPE_GREATER_THAN && (_opType) < OP_TYPE_IS_NOT_UNKNOWN)
#define SCL_DOWNGRADE_DATETYPE(_type) ((_type) == TSDB_DATA_TYPE_BIGINT || TSDB_DATA_TYPE_DOUBLE == (_type) || (_type) == TSDB_DATA_TYPE_UBIGINT)
+#define SCL_NO_NEED_CONVERT_COMPARISION(_ltype, _rtype, _optr) (IS_NUMERIC_TYPE(_ltype) && IS_NUMERIC_TYPE(_rtype) && ((_optr) >= OP_TYPE_GREATER_THAN && (_optr) <= OP_TYPE_NOT_EQUAL))
#define sclFatal(...) qFatal(__VA_ARGS__)
#define sclError(...) qError(__VA_ARGS__)
diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c
index 4377dbf14e..9e67635437 100644
--- a/source/libs/scalar/src/filter.c
+++ b/source/libs/scalar/src/filter.c
@@ -132,6 +132,77 @@ __compar_fn_t gDataCompare[] = {compareInt32Val, compareInt8Val, compareInt16Val
compareChkNotInString, compareStrPatternNotMatch, compareWStrPatternNotMatch
};
+__compar_fn_t gInt8SignCompare[] = {
+ compareInt8Val, compareInt8Int16, compareInt8Int32, compareInt8Int64, compareInt8Float, compareInt8Double
+};
+__compar_fn_t gInt8UsignCompare[] = {
+ compareInt8Uint8, compareInt8Uint16, compareInt8Uint32, compareInt8Uint64
+};
+
+__compar_fn_t gInt16SignCompare[] = {
+ compareInt16Int8, compareInt16Val, compareInt16Int32, compareInt16Int64, compareInt16Float, compareInt16Double
+};
+__compar_fn_t gInt16UsignCompare[] = {
+ compareInt16Uint8, compareInt16Uint16, compareInt16Uint32, compareInt16Uint64
+};
+
+__compar_fn_t gInt32SignCompare[] = {
+ compareInt32Int8, compareInt32Int16, compareInt32Val, compareInt32Int64, compareInt32Float, compareInt32Double
+};
+__compar_fn_t gInt32UsignCompare[] = {
+ compareInt32Uint8, compareInt32Uint16, compareInt32Uint32, compareInt32Uint64
+};
+
+__compar_fn_t gInt64SignCompare[] = {
+ compareInt64Int8, compareInt64Int16, compareInt64Int32, compareInt64Val, compareInt64Float, compareInt64Double
+};
+__compar_fn_t gInt64UsignCompare[] = {
+ compareInt64Uint8, compareInt64Uint16, compareInt64Uint32, compareInt64Uint64
+};
+
+__compar_fn_t gFloatSignCompare[] = {
+ compareFloatInt8, compareFloatInt16, compareFloatInt32, compareFloatInt64, compareFloatVal, compareFloatDouble
+};
+__compar_fn_t gFloatUsignCompare[] = {
+ compareFloatUint8, compareFloatUint16, compareFloatUint32, compareFloatUint64
+};
+
+__compar_fn_t gDoubleSignCompare[] = {
+ compareDoubleInt8, compareDoubleInt16, compareDoubleInt32, compareDoubleInt64, compareDoubleFloat, compareDoubleVal
+};
+__compar_fn_t gDoubleUsignCompare[] = {
+ compareDoubleUint8, compareDoubleUint16, compareDoubleUint32, compareDoubleUint64
+};
+
+__compar_fn_t gUint8SignCompare[] = {
+ compareUint8Int8, compareUint8Int16, compareUint8Int32, compareUint8Int64, compareUint8Float, compareUint8Double
+};
+__compar_fn_t gUint8UsignCompare[] = {
+ compareUint8Val, compareUint8Uint16, compareUint8Uint32, compareUint8Uint64
+};
+
+__compar_fn_t gUint16SignCompare[] = {
+ compareUint16Int8, compareUint16Int16, compareUint16Int32, compareUint16Int64, compareUint16Float, compareUint16Double
+};
+__compar_fn_t gUint16UsignCompare[] = {
+ compareUint16Uint8, compareUint16Val, compareUint16Uint32, compareUint16Uint64
+};
+
+__compar_fn_t gUint32SignCompare[] = {
+ compareUint32Int8, compareUint32Int16, compareUint32Int32, compareUint32Int64, compareUint32Float, compareUint32Double
+};
+__compar_fn_t gUint32UsignCompare[] = {
+ compareUint32Uint8, compareUint32Uint16, compareUint32Val, compareUint32Uint64
+};
+
+__compar_fn_t gUint64SignCompare[] = {
+ compareUint64Int8, compareUint64Int16, compareUint64Int32, compareUint64Int64, compareUint64Float, compareUint64Double
+};
+__compar_fn_t gUint64UsignCompare[] = {
+ compareUint64Uint8, compareUint64Uint16, compareUint64Uint32, compareUint64Val
+};
+
+
int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
int8_t comparFn = 0;
@@ -257,6 +328,93 @@ __compar_fn_t filterGetCompFunc(int32_t type, int32_t optr) {
return gDataCompare[filterGetCompFuncIdx(type, optr)];
}
+__compar_fn_t filterGetCompFuncEx(int32_t lType, int32_t rType, int32_t optr) {
+ switch (lType) {
+ case TSDB_DATA_TYPE_TINYINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt8SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt8UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_SMALLINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt16SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt16UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_INT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt32SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt32UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_BIGINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt64SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt64UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_FLOAT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gFloatSignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gFloatUsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_DOUBLE: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gDoubleSignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gDoubleUsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UTINYINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint8SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint8UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_USMALLINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint16SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint16UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint32SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint32UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UBIGINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint64SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint64UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return NULL;
+}
static FORCE_INLINE int32_t filterCompareGroupCtx(const void *pLeft, const void *pRight) {
SFilterGroupCtx *left = *((SFilterGroupCtx**)pLeft), *right = *((SFilterGroupCtx**)pRight);
@@ -910,14 +1068,14 @@ int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *f
return TSDB_CODE_SUCCESS;
}
-int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint32_t *uidx) {
+int32_t filterAddUnitImpl(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint8_t optr2, SFilterFieldId *right2, uint32_t *uidx) {
if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) {
if (info->pctx.unitHash == NULL) {
info->pctx.unitHash = taosHashInit(FILTER_DEFAULT_GROUP_SIZE * FILTER_DEFAULT_UNIT_SIZE, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, false);
} else {
- int64_t v = 0;
- FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1);
- void *hu = taosHashGet(info->pctx.unitHash, &v, sizeof(v));
+ char v[14] = {0};
+ FLT_PACKAGE_UNIT_HASH_KEY(&v, optr, optr2, left->idx, (right ? right->idx : -1), (right2 ? right2->idx : -1));
+ void *hu = taosHashGet(info->pctx.unitHash, v, sizeof(v));
if (hu) {
*uidx = *(uint32_t *)hu;
return TSDB_CODE_SUCCESS;
@@ -939,7 +1097,11 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
if (right) {
u->right = *right;
}
-
+ u->compare.optr2 = optr2;
+ if (right2) {
+ u->right2 = *right2;
+ }
+
if (u->right.type == FLD_TYPE_VALUE) {
SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u);
assert(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE));
@@ -960,9 +1122,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
*uidx = info->unitNum;
if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) {
- int64_t v = 0;
- FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1);
- taosHashPut(info->pctx.unitHash, &v, sizeof(v), uidx, sizeof(*uidx));
+ char v[14] = {0};
+ FLT_PACKAGE_UNIT_HASH_KEY(&v, optr, optr2, left->idx, (right ? right->idx : -1), (right2 ? right2->idx : -1));
+ taosHashPut(info->pctx.unitHash, v, sizeof(v), uidx, sizeof(*uidx));
}
++info->unitNum;
@@ -971,6 +1133,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
}
+int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint32_t *uidx) {
+ return filterAddUnitImpl(info, optr, left, right, 0, NULL, uidx);
+}
int32_t filterAddUnitToGroup(SFilterGroup *group, uint32_t unitIdx) {
if (group->unitNum >= group->unitSize) {
@@ -1147,8 +1312,8 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan
SIMPLE_COPY_VALUES(data2, &ra->e);
filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true);
- filterAddUnit(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx);
- filterAddUnitRight(dst, FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx);
+ filterAddUnitImpl(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right,
+ FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, &uidx);
filterAddUnitToGroup(g, uidx);
return TSDB_CODE_SUCCESS;
}
@@ -1222,8 +1387,8 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan
SIMPLE_COPY_VALUES(data2, &r->ra.e);
filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true);
- filterAddUnit(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx);
- filterAddUnitRight(dst, FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx);
+ filterAddUnitImpl(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right,
+ FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, &uidx);
filterAddUnitToGroup(g, uidx);
}
@@ -2073,6 +2238,44 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t
return TSDB_CODE_SUCCESS;
}
+bool filterIsSameUnits(SFilterColInfo* pCol1, SFilterColInfo* pCol2) {
+ if (pCol1->type != pCol2->type) {
+ return false;
+ }
+
+ if (RANGE_TYPE_MR_CTX == pCol1->type) {
+ SFilterRangeCtx* pCtx1 = (SFilterRangeCtx*)pCol1->info;
+ SFilterRangeCtx* pCtx2 = (SFilterRangeCtx*)pCol2->info;
+
+ if ((pCtx1->isnull != pCtx2->isnull) || (pCtx1->notnull != pCtx2->notnull) || (pCtx1->isrange != pCtx2->isrange)) {
+ return false;
+ }
+
+
+ SFilterRangeNode* pNode1 = pCtx1->rs;
+ SFilterRangeNode* pNode2 = pCtx2->rs;
+
+ while (true) {
+ if (NULL == pNode1 && NULL == pNode2) {
+ break;
+ }
+
+ if (NULL == pNode1 || NULL == pNode2) {
+ return false;
+ }
+
+ if (pNode1->ra.s != pNode2->ra.s || pNode1->ra.e != pNode2->ra.e || pNode1->ra.sflag != pNode2->ra.sflag || pNode1->ra.eflag != pNode2->ra.eflag) {
+ return false;
+ }
+
+ pNode1 = pNode1->next;
+ pNode2 = pNode2->next;
+ }
+ }
+
+ return true;
+}
+
void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool *conflict) {
uint32_t idx1 = 0, idx2 = 0, m = 0, n = 0;
bool equal = false;
@@ -2098,6 +2301,11 @@ void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool
return;
}
+ if (!filterIsSameUnits(&gRes1->colInfo[idx1], &gRes2->colInfo[idx2])) {
+ *conflict = true;
+ return;
+ }
+
// for long in operation
if (gRes1->colInfo[idx1].optr == OP_TYPE_EQUAL && gRes2->colInfo[idx2].optr == OP_TYPE_EQUAL) {
SFilterRangeCtx* ctx = gRes1->colInfo[idx1].info;
@@ -2711,17 +2919,22 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3
for (uint32_t g = 0; g < info->groupNum; ++g) {
SFilterGroup *group = &info->groups[g];
+ // first is block unint num for a group, following append unitNum blkUnitIdx for this group
*unitNum = group->unitNum;
all = 0;
empty = 0;
+ // save group idx start pointer
+ uint32_t * pGroupIdx = unitIdx;
for (uint32_t u = 0; u < group->unitNum; ++u) {
uint32_t uidx = group->unitIdxs[u];
if (info->blkUnitRes[uidx] == 1) {
+ // blkUnitRes == 1 is always true, so need not compare every time, delete this unit from group
--(*unitNum);
all = 1;
continue;
} else if (info->blkUnitRes[uidx] == -1) {
+ // blkUnitRes == -1 is alwary false, so in group is alwary false, need delete this group from blkGroupNum
*unitNum = 0;
empty = 1;
break;
@@ -2731,6 +2944,9 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3
}
if (*unitNum == 0) {
+ // if unit num is zero, reset unitIdx to start on this group
+ unitIdx = pGroupIdx;
+
--info->blkGroupNum;
assert(empty || all);
diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c
index aaa70ef5ae..a003315fca 100644
--- a/source/libs/scalar/src/sclvector.c
+++ b/source/libs/scalar/src/sclvector.c
@@ -909,11 +909,11 @@ int32_t vectorConvertImpl(const SScalarParam* pIn, SScalarParam* pOut, int32_t*
int8_t gConvertTypes[TSDB_DATA_TYPE_BLOB+1][TSDB_DATA_TYPE_BLOB+1] = {
/* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB */
/*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-/*BOOL*/ 0, 0, 0, 3, 4, 5, 6, 7, 7, 9, 7, 0, 12, 13, 14, 0, 7, 0, 0,
+/*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 7, 9, 7, 11, 12, 13, 14, 0, 7, 0, 0,
/*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 7, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0,
/*SMAL*/ 0, 0, 0, 0, 4, 5, 6, 7, 7, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0,
/*INT */ 0, 0, 0, 0, 0, 5, 6, 7, 7, 9, 7, 4, 4, 5, 7, 0, 7, 0, 0,
-/*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 7, 0, 7, 5, 5, 5, 7, 0, 7, 0, 0,
+/*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 7, 9, 7, 5, 5, 5, 7, 0, 7, 0, 0,
/*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, 7, 0, 0,
/*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, 7, 0, 0,
/*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0,
@@ -1681,10 +1681,14 @@ void vectorBitOr(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut,
void vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr) {
int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : TMAX(pLeft->numOfRows, pRight->numOfRows) - 1;
int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1;
-
- __compar_fn_t fp = filterGetCompFunc(GET_PARAM_TYPE(pLeft), optr);
- if(terrno != TSDB_CODE_SUCCESS){
- return;
+ int32_t lType = GET_PARAM_TYPE(pLeft);
+ int32_t rType = GET_PARAM_TYPE(pRight);
+ __compar_fn_t fp = NULL;
+
+ if (lType == rType) {
+ fp = filterGetCompFunc(lType, optr);
+ } else {
+ fp = filterGetCompFuncEx(lType, rType, optr);
}
pOut->numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows);
@@ -1716,22 +1720,26 @@ void vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *
void vectorCompare(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr) {
SScalarParam pLeftOut = {0};
SScalarParam pRightOut = {0};
-
- vectorConvert(pLeft, pRight, &pLeftOut, &pRightOut);
-
SScalarParam *param1 = NULL;
SScalarParam *param2 = NULL;
- if (pLeftOut.columnData != NULL) {
- param1 = &pLeftOut;
- } else {
+ if (SCL_NO_NEED_CONVERT_COMPARISION(GET_PARAM_TYPE(pLeft), GET_PARAM_TYPE(pRight), optr)) {
param1 = pLeft;
- }
-
- if (pRightOut.columnData != NULL) {
- param2 = &pRightOut;
- } else {
param2 = pRight;
+ } else {
+ vectorConvert(pLeft, pRight, &pLeftOut, &pRightOut);
+
+ if (pLeftOut.columnData != NULL) {
+ param1 = &pLeftOut;
+ } else {
+ param1 = pLeft;
+ }
+
+ if (pRightOut.columnData != NULL) {
+ param2 = &pRightOut;
+ } else {
+ param2 = pRight;
+ }
}
vectorCompareImpl(param1, param2, pOut, _ord, optr);
diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c
index 4701318779..1480920f90 100644
--- a/source/libs/tdb/src/db/tdbBtree.c
+++ b/source/libs/tdb/src/db/tdbBtree.c
@@ -509,7 +509,7 @@ static int tdbBtreeBalanceDeeper(SBTree *pBt, SPage *pRoot, SPage **ppChild, TXN
static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTxn) {
int ret;
- int nOlds;
+ int nOlds, pageIdx;
SPage *pOlds[3] = {0};
SCell *pDivCell[3] = {0};
int szDivCell[3];
@@ -849,13 +849,11 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
}
}
- // TODO: here is not corrent for drop case
- for (int i = 0; i < nNews; i++) {
- if (i < nOlds) {
- tdbPagerReturnPage(pBt->pPager, pOlds[i], pTxn);
- } else {
- tdbPagerReturnPage(pBt->pPager, pNews[i], pTxn);
- }
+ for (pageIdx = 0; pageIdx < nOlds; ++pageIdx) {
+ tdbPagerReturnPage(pBt->pPager, pOlds[pageIdx], pTxn);
+ }
+ for (; pageIdx < nNews; ++pageIdx) {
+ tdbPagerReturnPage(pBt->pPager, pNews[pageIdx], pTxn);
}
return 0;
diff --git a/source/libs/tdb/src/db/tdbPCache.c b/source/libs/tdb/src/db/tdbPCache.c
index 76d95cbb91..6254158591 100644
--- a/source/libs/tdb/src/db/tdbPCache.c
+++ b/source/libs/tdb/src/db/tdbPCache.c
@@ -98,6 +98,7 @@ SPage *tdbPCacheFetch(SPCache *pCache, const SPgid *pPgid, TXN *pTxn) {
// printf("thread %" PRId64 " fetch page %d pgno %d pPage %p nRef %d\n", taosGetSelfPthreadId(), pPage->id,
// TDB_PAGE_PGNO(pPage), pPage, nRef);
+ tdbDebug("pcache/fetch page %p/%d/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id, nRef);
return pPage;
}
diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c
index 4de99e8b1b..f90c392788 100644
--- a/source/libs/tdb/src/db/tdbPager.c
+++ b/source/libs/tdb/src/db/tdbPager.c
@@ -166,6 +166,7 @@ int tdbPagerWrite(SPager *pPager, SPage *pPage) {
// ref page one more time so the page will not be release
tdbRefPage(pPage);
+ tdbDebug("pcache/mdirty page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id);
// Set page as dirty
pPage->isDirty = 1;
diff --git a/source/libs/tdb/src/inc/tdbInt.h b/source/libs/tdb/src/inc/tdbInt.h
index 49126b80b6..6a694cf8f1 100644
--- a/source/libs/tdb/src/inc/tdbInt.h
+++ b/source/libs/tdb/src/inc/tdbInt.h
@@ -280,13 +280,13 @@ struct SPage {
static inline i32 tdbRefPage(SPage *pPage) {
i32 nRef = atomic_add_fetch_32(&((pPage)->nRef), 1);
- tdbTrace("ref page %d, nRef %d", pPage->id, nRef);
+ tdbTrace("ref page %p/%d, nRef %d", pPage, pPage->id, nRef);
return nRef;
}
static inline i32 tdbUnrefPage(SPage *pPage) {
i32 nRef = atomic_sub_fetch_32(&((pPage)->nRef), 1);
- tdbTrace("unref page %d, nRef %d", pPage->id, nRef);
+ tdbTrace("unref page %p/%d, nRef %d", pPage, pPage->id, nRef);
return nRef;
}
diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c
index 386ea95dd7..e880a5abdb 100644
--- a/source/libs/transport/src/thttp.c
+++ b/source/libs/transport/src/thttp.c
@@ -155,6 +155,8 @@ static void clientSentCb(uv_write_t* req, int32_t status) {
if (status != 0) {
terrno = TAOS_SYSTEM_ERROR(status);
uError("http-report failed to send data %s", uv_strerror(status));
+ uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
+ return;
} else {
uTrace("http-report succ to send data");
}
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index 7052b0b915..41688c7330 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -16,7 +16,7 @@
#include "transComm.h"
typedef struct SConnList {
- queue conn;
+ queue conns;
int32_t size;
} SConnList;
@@ -107,11 +107,11 @@ static void doCloseIdleConn(void* param);
static void cliReadTimeoutCb(uv_timer_t* handle);
// register timer in each thread to clear expire conn
// static void cliTimeoutCb(uv_timer_t* handle);
-// alloc buf for recv
+// alloc buffer for recv
static void cliAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
-// callback after read nbytes from socket
+// callback after recv nbytes from socket
static void cliRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf);
-// callback after write data to socket
+// callback after send data to socket
static void cliSendCb(uv_write_t* req, int status);
// callback after conn to server
static void cliConnCb(uv_connect_t* req, int status);
@@ -129,19 +129,14 @@ static SCliConn* cliCreateConn(SCliThrd* thrd);
static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle or not*/);
static void cliDestroy(uv_handle_t* handle);
static void cliSend(SCliConn* pConn);
+static void cliDestroyConnMsgs(SCliConn* conn, bool destroy);
-static bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx) {
- if (code != 0) return false;
- if (pCtx->retryCnt == 0) return false;
- if (transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) return false;
- return true;
-}
+// cli util func
+static bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx);
+static void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr);
+
+static int32_t cliBuildExceptResp(SCliMsg* pMsg, STransMsg* resp);
-void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr);
-/*
- * set TCP connection timeout per-socket level
- */
-static int cliCreateSocket();
// process data read from server, add decompress etc later
static void cliHandleResp(SCliConn* conn);
// handle except about conn
@@ -169,15 +164,14 @@ static void destroyThrdObj(SCliThrd* pThrd);
static void cliWalkCb(uv_handle_t* handle, void* arg);
static void cliReleaseUnfinishedMsg(SCliConn* conn) {
- SCliMsg* pMsg = NULL;
for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) {
- pMsg = transQueueGet(&conn->cliMsgs, i);
- if (pMsg != NULL && pMsg->ctx != NULL) {
- if (conn->ctx.freeFunc != NULL) {
- conn->ctx.freeFunc(pMsg->ctx->ahandle);
+ SCliMsg* msg = transQueueGet(&conn->cliMsgs, i);
+ if (msg != NULL && msg->ctx != NULL) {
+ if (conn->ctx.freeFunc != NULL && msg->ctx->ahandle != NULL) {
+ conn->ctx.freeFunc(msg->ctx->ahandle);
}
}
- destroyCmsg(pMsg);
+ destroyCmsg(msg);
}
}
#define CLI_RELEASE_UV(loop) \
@@ -217,8 +211,10 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
} \
if (i == sz) { \
pMsg = NULL; \
+ tDebug("msg not found, %" PRIu64 "", ahandle); \
} else { \
pMsg = transQueueRm(&conn->cliMsgs, i); \
+ tDebug("msg found, %" PRIu64 "", ahandle); \
} \
} while (0)
#define CONN_GET_NEXT_SENDMSG(conn) \
@@ -470,8 +466,8 @@ void* createConnPool(int size) {
void* destroyConnPool(void* pool) {
SConnList* connList = taosHashIterate((SHashObj*)pool, NULL);
while (connList != NULL) {
- while (!QUEUE_IS_EMPTY(&connList->conn)) {
- queue* h = QUEUE_HEAD(&connList->conn);
+ while (!QUEUE_IS_EMPTY(&connList->conns)) {
+ queue* h = QUEUE_HEAD(&connList->conns);
SCliConn* c = QUEUE_DATA(h, SCliConn, q);
cliDestroyConn(c, true);
}
@@ -484,21 +480,21 @@ void* destroyConnPool(void* pool) {
static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) {
char key[32] = {0};
CONN_CONSTRUCT_HASH_KEY(key, ip, port);
- SHashObj* pPool = pool;
- SConnList* plist = taosHashGet(pPool, key, strlen(key));
+
+ SConnList* plist = taosHashGet((SHashObj*)pool, key, strlen(key));
if (plist == NULL) {
SConnList list = {0};
- taosHashPut(pPool, key, strlen(key), (void*)&list, sizeof(list));
- plist = taosHashGet(pPool, key, strlen(key));
- QUEUE_INIT(&plist->conn);
+ taosHashPut((SHashObj*)pool, key, strlen(key), (void*)&list, sizeof(list));
+ plist = taosHashGet((SHashObj*)pool, key, strlen(key));
+ QUEUE_INIT(&plist->conns);
}
- if (QUEUE_IS_EMPTY(&plist->conn)) {
+ if (QUEUE_IS_EMPTY(&plist->conns)) {
return NULL;
}
plist->size -= 1;
- queue* h = QUEUE_HEAD(&plist->conn);
+ queue* h = QUEUE_HEAD(&plist->conns);
SCliConn* conn = QUEUE_DATA(h, SCliConn, q);
conn->status = ConnNormal;
QUEUE_REMOVE(&conn->q);
@@ -514,22 +510,21 @@ static void addConnToPool(void* pool, SCliConn* conn) {
if (conn->status == ConnInPool) {
return;
}
- SCliThrd* thrd = conn->hostThrd;
- CONN_HANDLE_THREAD_QUIT(thrd);
-
allocConnRef(conn, true);
+ SCliThrd* thrd = conn->hostThrd;
if (conn->timer != NULL) {
uv_timer_stop(conn->timer);
taosArrayPush(thrd->timerList, &conn->timer);
conn->timer->data = NULL;
conn->timer = NULL;
}
+ if (T_REF_VAL_GET(conn) > 1) {
+ transUnrefCliHandle(conn);
+ }
+
+ cliDestroyConnMsgs(conn, false);
- STrans* pTransInst = thrd->pTransInst;
- cliReleaseUnfinishedMsg(conn);
- transQueueClear(&conn->cliMsgs);
- transCtxCleanup(&conn->ctx);
conn->status = ConnInPool;
if (conn->list == NULL) {
@@ -540,18 +535,15 @@ static void addConnToPool(void* pool, SCliConn* conn) {
} else {
tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap);
}
- assert(conn->list != NULL);
- QUEUE_INIT(&conn->q);
- QUEUE_PUSH(&conn->list->conn, &conn->q);
+ QUEUE_PUSH(&conn->list->conns, &conn->q);
conn->list->size += 1;
- conn->task = NULL;
- assert(!QUEUE_IS_EMPTY(&conn->list->conn));
-
if (conn->list->size >= 50) {
STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg));
arg->param1 = conn;
arg->param2 = thrd;
+
+ STrans* pTransInst = thrd->pTransInst;
conn->task = transDQSched(thrd->timeoutQueue, doCloseIdleConn, arg, CONN_PERSIST_TIME(pTransInst->idleTime));
}
}
@@ -691,11 +683,10 @@ static void cliDestroy(uv_handle_t* handle) {
transRemoveExHandle(transGetRefMgt(), conn->refId);
taosMemoryFree(conn->ip);
- conn->stream->data = NULL;
taosMemoryFree(conn->stream);
- transCtxCleanup(&conn->ctx);
- cliReleaseUnfinishedMsg(conn);
- transQueueDestroy(&conn->cliMsgs);
+
+ cliDestroyConnMsgs(conn, true);
+
tTrace("%s conn %p destroy successfully", CONN_GET_INST_LABEL(conn), conn);
transReqQueueClear(&conn->wreqQueue);
transDestroyBuffer(&conn->readBuf);
@@ -738,8 +729,6 @@ static void cliSendCb(uv_write_t* req, int status) {
}
void cliSend(SCliConn* pConn) {
- CONN_HANDLE_BROKEN(pConn);
-
assert(!transQueueEmpty(&pConn->cliMsgs));
SCliMsg* pCliMsg = NULL;
@@ -756,8 +745,8 @@ void cliSend(SCliConn* pConn) {
pMsg->pCont = (void*)rpcMallocCont(0);
pMsg->contLen = 0;
}
- int msgLen = transMsgLenFromCont(pMsg->contLen);
+ int msgLen = transMsgLenFromCont(pMsg->contLen);
STransMsgHead* pHead = transHeadFromCont(pMsg->pCont);
pHead->ahandle = pCtx != NULL ? (uint64_t)pCtx->ahandle : 0;
pHead->noResp = REQUEST_NO_RESP(pMsg) ? 1 : 0;
@@ -769,8 +758,6 @@ void cliSend(SCliConn* pConn) {
pHead->traceId = pMsg->info.traceId;
pHead->magicNum = htonl(TRANS_MAGIC_NUM);
- uv_buf_t wb = uv_buf_init((char*)pHead, msgLen);
-
STraceId* trace = &pMsg->info.traceId;
tGDebug("%s conn %p %s is sent to %s, local info %s, len:%d", CONN_GET_INST_LABEL(pConn), pConn,
TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, pMsg->contLen);
@@ -792,6 +779,8 @@ void cliSend(SCliConn* pConn) {
tGTrace("%s conn %p start timer for msg:%s", CONN_GET_INST_LABEL(pConn), pConn, TMSG_INFO(pMsg->msgType));
uv_timer_start((uv_timer_t*)pConn->timer, cliReadTimeoutCb, TRANS_READ_TIMEOUT, 0);
}
+
+ uv_buf_t wb = uv_buf_init((char*)pHead, msgLen);
uv_write_t* req = transReqQueuePush(&pConn->wreqQueue);
uv_write(req, (uv_stream_t*)pConn->stream, &wb, 1, cliSendCb);
return;
@@ -807,7 +796,6 @@ void cliConnCb(uv_connect_t* req, int status) {
cliHandleExcept(pConn);
return;
}
- // int addrlen = sizeof(pConn->addr);
struct sockaddr peername, sockname;
int addrlen = sizeof(peername);
@@ -840,7 +828,7 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
int64_t refId = (int64_t)(pMsg->msg.info.handle);
SExHandle* exh = transAcquireExHandle(transGetRefMgt(), refId);
if (exh == NULL) {
- tDebug("%" PRId64 " already release", refId);
+ tDebug("%" PRId64 " already released", refId);
destroyCmsg(pMsg);
return;
}
@@ -856,6 +844,9 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
return;
}
cliSend(conn);
+ } else {
+ tError("%s conn %p already released", CONN_GET_INST_LABEL(conn), conn);
+ destroyCmsg(pMsg);
}
}
static void cliHandleUpdate(SCliMsg* pMsg, SCliThrd* pThrd) {
@@ -905,6 +896,27 @@ void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr) {
}
}
}
+
+bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx) {
+ if (code != 0) return false;
+ if (pCtx->retryCnt == 0) return false;
+ if (transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) return false;
+ return true;
+}
+
+int32_t cliBuildExceptResp(SCliMsg* pMsg, STransMsg* pResp) {
+ if (pMsg == NULL) return -1;
+
+ memset(pResp, 0, sizeof(STransMsg));
+
+ pResp->code = TSDB_CODE_RPC_BROKEN_LINK;
+ pResp->msgType = pMsg->msg.msgType + 1;
+ pResp->info.ahandle = pMsg->ctx ? pMsg->ctx->ahandle : NULL;
+ pResp->info.traceId = pMsg->msg.info.traceId;
+
+ return 0;
+}
+
void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
STrans* pTransInst = pThrd->pTransInst;
STransConnCtx* pCtx = pMsg->ctx;
@@ -920,13 +932,8 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
SCliConn* conn = cliGetConn(pMsg, pThrd, &ignore);
if (ignore == true) {
// persist conn already release by server
- STransMsg resp = {0};
- resp.code = TSDB_CODE_RPC_BROKEN_LINK;
- resp.msgType = pMsg->msg.msgType + 1;
-
- resp.info.ahandle = pMsg && pMsg->ctx ? pMsg->ctx->ahandle : NULL;
- resp.info.traceId = pMsg->msg.info.traceId;
-
+ STransMsg resp;
+ cliBuildExceptResp(pMsg, &resp);
pTransInst->cfp(pTransInst->parent, &resp, NULL);
destroyCmsg(pMsg);
return;
@@ -991,9 +998,6 @@ static void cliAsyncCb(uv_async_t* handle) {
QUEUE_REMOVE(h);
SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q);
- if (pMsg == NULL) {
- continue;
- }
(*cliAsyncHandle[pMsg->type])(pMsg, pThrd);
count++;
}
@@ -1035,24 +1039,58 @@ static void cliPrepareCb(uv_prepare_t* handle) {
if (thrd->stopMsg != NULL) cliHandleQuit(thrd->stopMsg, thrd);
}
+void cliDestroyConnMsgs(SCliConn* conn, bool destroy) {
+ transCtxCleanup(&conn->ctx);
+ cliReleaseUnfinishedMsg(conn);
+ if (destroy == 1) {
+ transQueueDestroy(&conn->cliMsgs);
+ } else {
+ transQueueClear(&conn->cliMsgs);
+ }
+}
+
+void cliIteraConnMsgs(SCliConn* conn) {
+ SCliThrd* pThrd = conn->hostThrd;
+ STrans* pTransInst = pThrd->pTransInst;
+
+ for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) {
+ SCliMsg* cmsg = transQueueGet(&conn->cliMsgs, i);
+ if (cmsg->type == Release || REQUEST_NO_RESP(&cmsg->msg) || cmsg->msg.msgType == TDMT_SCH_DROP_TASK) {
+ continue;
+ }
+
+ STransMsg resp = {0};
+ if (-1 == cliBuildExceptResp(cmsg, &resp)) {
+ continue;
+ }
+ pTransInst->cfp(pTransInst->parent, &resp, NULL);
+
+ cmsg->ctx->ahandle = NULL;
+ }
+}
bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead) {
if (pHead->release == 1 && (pHead->msgLen) == sizeof(*pHead)) {
uint64_t ahandle = pHead->ahandle;
+ tDebug("ahandle = %" PRIu64 "", ahandle);
SCliMsg* pMsg = NULL;
CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle);
+
transClearBuffer(&conn->readBuf);
transFreeMsg(transContFromHead((char*)pHead));
- if (transQueueSize(&conn->cliMsgs) > 0 && ahandle == 0) {
- SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, 0);
- if (cliMsg->type == Release) return true;
+
+ for (int i = 0; ahandle == 0 && i < transQueueSize(&conn->cliMsgs); i++) {
+ SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, i);
+ if (cliMsg->type == Release) {
+ assert(pMsg == NULL);
+ return true;
+ }
}
+
+ cliIteraConnMsgs(conn);
+
tDebug("%s conn %p receive release request, refId:%" PRId64 "", CONN_GET_INST_LABEL(conn), conn, conn->refId);
- if (T_REF_VAL_GET(conn) > 1) {
- transUnrefCliHandle(conn);
- }
destroyCmsg(pMsg);
- cliReleaseUnfinishedMsg(conn);
- transQueueClear(&conn->cliMsgs);
+
addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn);
return true;
}
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index 207b967923..46046b2a95 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -492,7 +492,6 @@ void uvWorkerAsyncCb(uv_async_t* handle) {
// release handle to rpc init
if (msg->type == Quit) {
(*transAsyncHandle[msg->type])(msg, pThrd);
- continue;
} else {
STransMsg transMsg = msg->msg;
@@ -771,7 +770,7 @@ static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName) {
// conn set
QUEUE_INIT(&pThrd->conn);
- pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 1, pThrd, uvWorkerAsyncCb);
+ pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 5, pThrd, uvWorkerAsyncCb);
uv_pipe_connect(&pThrd->connect_req, pThrd->pipe, pipeName, uvOnPipeConnectionCb);
// uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
return true;
diff --git a/source/os/src/osSemaphore.c b/source/os/src/osSemaphore.c
index a95503b5e5..8cc6f0ef2e 100644
--- a/source/os/src/osSemaphore.c
+++ b/source/os/src/osSemaphore.c
@@ -1,531 +1,531 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#define ALLOW_FORBID_FUNC
-#define _DEFAULT_SOURCE
-#include "os.h"
-#include "pthread.h"
-#include "tdef.h"
-
-#ifdef WINDOWS
-
-/*
- * windows implementation
- */
-
-#include
-
-bool taosCheckPthreadValid(TdThread thread) { return thread.p != NULL; }
-
-void taosResetPthread(TdThread* thread) { thread->p = 0; }
-
-int64_t taosGetPthreadId(TdThread thread) {
-#ifdef PTW32_VERSION
- return pthread_getw32threadid_np(thread);
-#else
- return (int64_t)thread;
-#endif
-}
-
-int64_t taosGetSelfPthreadId() { return GetCurrentThreadId(); }
-
-bool taosComparePthread(TdThread first, TdThread second) { return first.p == second.p; }
-
-int32_t taosGetPId() { return GetCurrentProcessId(); }
-
-int32_t taosGetAppName(char* name, int32_t* len) {
- char filepath[1024] = {0};
-
- GetModuleFileName(NULL, filepath, MAX_PATH);
- char* sub = strrchr(filepath, '.');
- if (sub != NULL) {
- *sub = '\0';
- }
- char* end = strrchr(filepath, TD_DIRSEP[0]);
- if (end == NULL) {
- end = filepath;
- }
-
- tstrncpy(name, end, TSDB_APP_NAME_LEN);
-
- if (len != NULL) {
- *len = (int32_t)strlen(end);
- }
-
- return 0;
-}
-
-int32_t tsem_wait(tsem_t* sem) {
- int ret = 0;
- do {
- ret = sem_wait(sem);
- } while (ret != 0 && errno == EINTR);
- return ret;
-}
-
-int32_t tsem_timewait(tsem_t* sem, int64_t nanosecs) {
- struct timespec ts, rel;
- FILETIME ft_before, ft_after;
- int rc;
-
- rel.tv_sec = 0;
- rel.tv_nsec = nanosecs;
-
- GetSystemTimeAsFileTime(&ft_before);
- // errno = 0;
- rc = sem_timedwait(sem, pthread_win32_getabstime_np(&ts, &rel));
-
- /* This should have timed out */
- // assert(errno == ETIMEDOUT);
- // assert(rc != 0);
- // GetSystemTimeAsFileTime(&ft_after);
- // // We specified a non-zero wait. Time must advance.
- // if (ft_before.dwLowDateTime == ft_after.dwLowDateTime && ft_before.dwHighDateTime == ft_after.dwHighDateTime)
- // {
- // printf("nanoseconds: %d, rc: %d, code:0x%x. before filetime: %d, %d; after filetime: %d, %d\n",
- // nanosecs, rc, errno,
- // (int)ft_before.dwLowDateTime, (int)ft_before.dwHighDateTime,
- // (int)ft_after.dwLowDateTime, (int)ft_after.dwHighDateTime);
- // printf("time must advance during sem_timedwait.");
- // return 1;
- // }
- return rc;
-}
-
-#elif defined(_TD_DARWIN_64)
-
-/*
- * darwin implementation
- */
-
-#include
-
-// #define SEM_USE_PTHREAD
-// #define SEM_USE_POSIX
-// #define SEM_USE_SEM
-
-// #ifdef SEM_USE_SEM
-// #include
-// #include
-// #include
-// #include
-
-// static TdThread sem_thread;
-// static TdThreadOnce sem_once;
-// static task_t sem_port;
-// static volatile int sem_inited = 0;
-// static semaphore_t sem_exit;
-
-// static void *sem_thread_routine(void *arg) {
-// (void)arg;
-// setThreadName("sem_thrd");
-
-// sem_port = mach_task_self();
-// kern_return_t ret = semaphore_create(sem_port, &sem_exit, SYNC_POLICY_FIFO, 0);
-// if (ret != KERN_SUCCESS) {
-// fprintf(stderr, "==%s[%d]%s()==failed to create sem_exit\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__);
-// sem_inited = -1;
-// return NULL;
-// }
-// sem_inited = 1;
-// semaphore_wait(sem_exit);
-// return NULL;
-// }
-
-// static void once_init(void) {
-// int r = 0;
-// r = taosThreadCreate(&sem_thread, NULL, sem_thread_routine, NULL);
-// if (r) {
-// fprintf(stderr, "==%s[%d]%s()==failed to create thread\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__);
-// return;
-// }
-// while (sem_inited == 0) {
-// ;
-// }
-// }
-// #endif
-
-// struct tsem_s {
-// #ifdef SEM_USE_PTHREAD
-// TdThreadMutex lock;
-// TdThreadCond cond;
-// volatile int64_t val;
-// #elif defined(SEM_USE_POSIX)
-// size_t id;
-// sem_t *sem;
-// #elif defined(SEM_USE_SEM)
-// semaphore_t sem;
-// #else // SEM_USE_PTHREAD
-// dispatch_semaphore_t sem;
-// #endif // SEM_USE_PTHREAD
-
-// volatile unsigned int valid : 1;
-// };
-
-// int tsem_init(tsem_t *sem, int pshared, unsigned int value) {
-// // fprintf(stderr, "==%s[%d]%s():[%p]==creating\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// if (*sem) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==already initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// struct tsem_s *p = (struct tsem_s *)taosMemoryCalloc(1, sizeof(*p));
-// if (!p) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==out of memory\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// abort();
-// }
-
-// #ifdef SEM_USE_PTHREAD
-// int r = taosThreadMutexInit(&p->lock, NULL);
-// do {
-// if (r) break;
-// r = taosThreadCondInit(&p->cond, NULL);
-// if (r) {
-// taosThreadMutexDestroy(&p->lock);
-// break;
-// }
-// p->val = value;
-// } while (0);
-// if (r) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==not created\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// abort();
-// }
-// #elif defined(SEM_USE_POSIX)
-// static size_t tick = 0;
-// do {
-// size_t id = atomic_add_fetch_64(&tick, 1);
-// if (id == SEM_VALUE_MAX) {
-// atomic_store_64(&tick, 0);
-// id = 0;
-// }
-// char name[NAME_MAX - 4];
-// snprintf(name, sizeof(name), "/t" PRId64, id);
-// p->sem = sem_open(name, O_CREAT | O_EXCL, pshared, value);
-// p->id = id;
-// if (p->sem != SEM_FAILED) break;
-// int e = errno;
-// if (e == EEXIST) continue;
-// if (e == EINTR) continue;
-// fprintf(stderr, "==%s[%d]%s():[%p]==not created[%d]%s\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem,
-// e, strerror(e));
-// abort();
-// } while (p->sem == SEM_FAILED);
-// #elif defined(SEM_USE_SEM)
-// taosThreadOnce(&sem_once, once_init);
-// if (sem_inited != 1) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal resource init failed\n", taosDirEntryBaseName(__FILE__), __LINE__,
-// __func__, sem);
-// errno = ENOMEM;
-// return -1;
-// }
-// kern_return_t ret = semaphore_create(sem_port, &p->sem, SYNC_POLICY_FIFO, value);
-// if (ret != KERN_SUCCESS) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==semophore_create failed\n", taosDirEntryBaseName(__FILE__), __LINE__,
-// __func__,
-// sem);
-// // we fail-fast here, because we have less-doc about semaphore_create for the moment
-// abort();
-// }
-// #else // SEM_USE_PTHREAD
-// p->sem = dispatch_semaphore_create(value);
-// if (p->sem == NULL) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==not created\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// abort();
-// }
-// #endif // SEM_USE_PTHREAD
-
-// p->valid = 1;
-
-// *sem = p;
-
-// return 0;
-// }
-
-// int tsem_wait(tsem_t *sem) {
-// if (!*sem) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// abort();
-// }
-// struct tsem_s *p = *sem;
-// if (!p->valid) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem); abort();
-// }
-// #ifdef SEM_USE_PTHREAD
-// if (taosThreadMutexLock(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// p->val -= 1;
-// if (p->val < 0) {
-// if (taosThreadCondWait(&p->cond, &p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__,
-// __func__,
-// sem);
-// abort();
-// }
-// }
-// if (taosThreadMutexUnlock(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// return 0;
-// #elif defined(SEM_USE_POSIX)
-// return sem_wait(p->sem);
-// #elif defined(SEM_USE_SEM)
-// return semaphore_wait(p->sem);
-// #else // SEM_USE_PTHREAD
-// return dispatch_semaphore_wait(p->sem, DISPATCH_TIME_FOREVER);
-// #endif // SEM_USE_PTHREAD
-// }
-
-// int tsem_post(tsem_t *sem) {
-// if (!*sem) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// abort();
-// }
-// struct tsem_s *p = *sem;
-// if (!p->valid) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem); abort();
-// }
-// #ifdef SEM_USE_PTHREAD
-// if (taosThreadMutexLock(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// p->val += 1;
-// if (p->val <= 0) {
-// if (taosThreadCondSignal(&p->cond)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__,
-// __func__,
-// sem);
-// abort();
-// }
-// }
-// if (taosThreadMutexUnlock(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// return 0;
-// #elif defined(SEM_USE_POSIX)
-// return sem_post(p->sem);
-// #elif defined(SEM_USE_SEM)
-// return semaphore_signal(p->sem);
-// #else // SEM_USE_PTHREAD
-// return dispatch_semaphore_signal(p->sem);
-// #endif // SEM_USE_PTHREAD
-// }
-
-// int tsem_destroy(tsem_t *sem) {
-// // fprintf(stderr, "==%s[%d]%s():[%p]==destroying\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// if (!*sem) {
-// // fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// // abort();
-// return 0;
-// }
-// struct tsem_s *p = *sem;
-// if (!p->valid) {
-// // fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// // sem); abort();
-// return 0;
-// }
-// #ifdef SEM_USE_PTHREAD
-// if (taosThreadMutexLock(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// p->valid = 0;
-// if (taosThreadCondDestroy(&p->cond)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// if (taosThreadMutexUnlock(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// if (taosThreadMutexDestroy(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// #elif defined(SEM_USE_POSIX)
-// char name[NAME_MAX - 4];
-// snprintf(name, sizeof(name), "/t" PRId64, p->id);
-// int r = sem_unlink(name);
-// if (r) {
-// int e = errno;
-// fprintf(stderr, "==%s[%d]%s():[%p]==unlink failed[%d]%s\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem,
-// e, strerror(e));
-// abort();
-// }
-// #elif defined(SEM_USE_SEM)
-// semaphore_destroy(sem_port, p->sem);
-// #else // SEM_USE_PTHREAD
-// #endif // SEM_USE_PTHREAD
-
-// p->valid = 0;
-// taosMemoryFree(p);
-
-// *sem = NULL;
-// return 0;
-// }
-
-int tsem_init(tsem_t *psem, int flags, unsigned int count) {
- *psem = dispatch_semaphore_create(count);
- if (*psem == NULL) return -1;
- return 0;
-}
-
-int tsem_destroy(tsem_t *psem) {
- return 0;
-}
-
-int tsem_post(tsem_t *psem) {
- if (psem == NULL || *psem == NULL) return -1;
- dispatch_semaphore_signal(*psem);
- return 0;
-}
-
-int tsem_wait(tsem_t *psem) {
- if (psem == NULL || *psem == NULL) return -1;
- dispatch_semaphore_wait(*psem, DISPATCH_TIME_FOREVER);
- return 0;
-}
-
-int tsem_timewait(tsem_t *psem, int64_t nanosecs) {
- if (psem == NULL || *psem == NULL) return -1;
- dispatch_semaphore_wait(*psem, nanosecs);
- return 0;
-}
-
-bool taosCheckPthreadValid(TdThread thread) {
- int32_t ret = taosThreadKill(thread, 0);
- if (ret == ESRCH) return false;
- if (ret == EINVAL) return false;
- // alive
- return true;
-}
-
-int64_t taosGetSelfPthreadId() {
- TdThread thread = taosThreadSelf();
- return (int64_t)thread;
-}
-
-int64_t taosGetPthreadId(TdThread thread) { return (int64_t)thread; }
-
-void taosResetPthread(TdThread *thread) { *thread = NULL; }
-
-bool taosComparePthread(TdThread first, TdThread second) { return taosThreadEqual(first, second) ? true : false; }
-
-int32_t taosGetPId() { return (int32_t)getpid(); }
-
-int32_t taosGetAppName(char *name, int32_t *len) {
- char buf[PATH_MAX + 1];
- buf[0] = '\0';
- proc_name(getpid(), buf, sizeof(buf) - 1);
- buf[PATH_MAX] = '\0';
- size_t n = strlen(buf);
- if (len) *len = n;
- if (name) tstrncpy(name, buf, TSDB_APP_NAME_LEN);
- return 0;
-}
-
-#else
-
-/*
- * linux implementation
- */
-
-#include
-#include
-
-bool taosCheckPthreadValid(TdThread thread) { return thread != 0; }
-
-int64_t taosGetSelfPthreadId() {
- static __thread int id = 0;
- if (id != 0) return id;
- id = syscall(SYS_gettid);
- return id;
-}
-
-int64_t taosGetPthreadId(TdThread thread) { return (int64_t)thread; }
-void taosResetPthread(TdThread* thread) { *thread = 0; }
-bool taosComparePthread(TdThread first, TdThread second) { return first == second; }
-
-int32_t taosGetPId() {
- static int32_t pid;
- if (pid != 0) return pid;
- pid = getpid();
- return pid;
-}
-
-int32_t taosGetAppName(char* name, int32_t* len) {
- const char* self = "/proc/self/exe";
- char path[PATH_MAX] = {0};
-
- if (readlink(self, path, PATH_MAX) <= 0) {
- return -1;
- }
-
- path[PATH_MAX - 1] = 0;
- char* end = strrchr(path, '/');
- if (end == NULL) {
- return -1;
- }
-
- ++end;
-
- tstrncpy(name, end, TSDB_APP_NAME_LEN);
-
- if (len != NULL) {
- *len = strlen(name);
- }
-
- return 0;
-}
-
-int32_t tsem_wait(tsem_t* sem) {
- int ret = 0;
- do {
- ret = sem_wait(sem);
- } while (ret != 0 && errno == EINTR);
- return ret;
-}
-
-int32_t tsem_timewait(tsem_t* sem, int64_t nanosecs) {
- int ret = 0;
-
- struct timespec tv = {
- .tv_sec = 0,
- .tv_nsec = nanosecs,
- };
-
- while ((ret = sem_timedwait(sem, &tv)) == -1 && errno == EINTR) continue;
-
- return ret;
-}
-
-#endif
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define ALLOW_FORBID_FUNC
+#define _DEFAULT_SOURCE
+#include "os.h"
+#include "pthread.h"
+#include "tdef.h"
+
+#ifdef WINDOWS
+
+/*
+ * windows implementation
+ */
+
+#include
+
+bool taosCheckPthreadValid(TdThread thread) { return thread.p != NULL; }
+
+void taosResetPthread(TdThread* thread) { thread->p = 0; }
+
+int64_t taosGetPthreadId(TdThread thread) {
+#ifdef PTW32_VERSION
+ return pthread_getw32threadid_np(thread);
+#else
+ return (int64_t)thread;
+#endif
+}
+
+int64_t taosGetSelfPthreadId() { return GetCurrentThreadId(); }
+
+bool taosComparePthread(TdThread first, TdThread second) { return first.p == second.p; }
+
+int32_t taosGetPId() { return GetCurrentProcessId(); }
+
+int32_t taosGetAppName(char* name, int32_t* len) {
+ char filepath[1024] = {0};
+
+ GetModuleFileName(NULL, filepath, MAX_PATH);
+ char* sub = strrchr(filepath, '.');
+ if (sub != NULL) {
+ *sub = '\0';
+ }
+ char* end = strrchr(filepath, TD_DIRSEP[0]);
+ if (end == NULL) {
+ end = filepath;
+ }
+
+ tstrncpy(name, end, TSDB_APP_NAME_LEN);
+
+ if (len != NULL) {
+ *len = (int32_t)strlen(end);
+ }
+
+ return 0;
+}
+
+int32_t tsem_wait(tsem_t* sem) {
+ int ret = 0;
+ do {
+ ret = sem_wait(sem);
+ } while (ret != 0 && errno == EINTR);
+ return ret;
+}
+
+int32_t tsem_timewait(tsem_t* sem, int64_t nanosecs) {
+ struct timespec ts, rel;
+ FILETIME ft_before, ft_after;
+ int rc;
+
+ rel.tv_sec = 0;
+ rel.tv_nsec = nanosecs;
+
+ GetSystemTimeAsFileTime(&ft_before);
+ // errno = 0;
+ rc = sem_timedwait(sem, pthread_win32_getabstime_np(&ts, &rel));
+
+ /* This should have timed out */
+ // assert(errno == ETIMEDOUT);
+ // assert(rc != 0);
+ // GetSystemTimeAsFileTime(&ft_after);
+ // // We specified a non-zero wait. Time must advance.
+ // if (ft_before.dwLowDateTime == ft_after.dwLowDateTime && ft_before.dwHighDateTime == ft_after.dwHighDateTime)
+ // {
+ // printf("nanoseconds: %d, rc: %d, code:0x%x. before filetime: %d, %d; after filetime: %d, %d\n",
+ // nanosecs, rc, errno,
+ // (int)ft_before.dwLowDateTime, (int)ft_before.dwHighDateTime,
+ // (int)ft_after.dwLowDateTime, (int)ft_after.dwHighDateTime);
+ // printf("time must advance during sem_timedwait.");
+ // return 1;
+ // }
+ return rc;
+}
+
+#elif defined(_TD_DARWIN_64)
+
+/*
+ * darwin implementation
+ */
+
+#include
+
+// #define SEM_USE_PTHREAD
+// #define SEM_USE_POSIX
+// #define SEM_USE_SEM
+
+// #ifdef SEM_USE_SEM
+// #include
+// #include
+// #include
+// #include
+
+// static TdThread sem_thread;
+// static TdThreadOnce sem_once;
+// static task_t sem_port;
+// static volatile int sem_inited = 0;
+// static semaphore_t sem_exit;
+
+// static void *sem_thread_routine(void *arg) {
+// (void)arg;
+// setThreadName("sem_thrd");
+
+// sem_port = mach_task_self();
+// kern_return_t ret = semaphore_create(sem_port, &sem_exit, SYNC_POLICY_FIFO, 0);
+// if (ret != KERN_SUCCESS) {
+// fprintf(stderr, "==%s[%d]%s()==failed to create sem_exit\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__);
+// sem_inited = -1;
+// return NULL;
+// }
+// sem_inited = 1;
+// semaphore_wait(sem_exit);
+// return NULL;
+// }
+
+// static void once_init(void) {
+// int r = 0;
+// r = taosThreadCreate(&sem_thread, NULL, sem_thread_routine, NULL);
+// if (r) {
+// fprintf(stderr, "==%s[%d]%s()==failed to create thread\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__);
+// return;
+// }
+// while (sem_inited == 0) {
+// ;
+// }
+// }
+// #endif
+
+// struct tsem_s {
+// #ifdef SEM_USE_PTHREAD
+// TdThreadMutex lock;
+// TdThreadCond cond;
+// volatile int64_t val;
+// #elif defined(SEM_USE_POSIX)
+// size_t id;
+// sem_t *sem;
+// #elif defined(SEM_USE_SEM)
+// semaphore_t sem;
+// #else // SEM_USE_PTHREAD
+// dispatch_semaphore_t sem;
+// #endif // SEM_USE_PTHREAD
+
+// volatile unsigned int valid : 1;
+// };
+
+// int tsem_init(tsem_t *sem, int pshared, unsigned int value) {
+// // fprintf(stderr, "==%s[%d]%s():[%p]==creating\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
+// if (*sem) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==already initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem);
+// abort();
+// }
+// struct tsem_s *p = (struct tsem_s *)taosMemoryCalloc(1, sizeof(*p));
+// if (!p) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==out of memory\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
+// abort();
+// }
+
+// #ifdef SEM_USE_PTHREAD
+// int r = taosThreadMutexInit(&p->lock, NULL);
+// do {
+// if (r) break;
+// r = taosThreadCondInit(&p->cond, NULL);
+// if (r) {
+// taosThreadMutexDestroy(&p->lock);
+// break;
+// }
+// p->val = value;
+// } while (0);
+// if (r) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==not created\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
+// abort();
+// }
+// #elif defined(SEM_USE_POSIX)
+// static size_t tick = 0;
+// do {
+// size_t id = atomic_add_fetch_64(&tick, 1);
+// if (id == SEM_VALUE_MAX) {
+// atomic_store_64(&tick, 0);
+// id = 0;
+// }
+// char name[NAME_MAX - 4];
+// snprintf(name, sizeof(name), "/t" PRId64, id);
+// p->sem = sem_open(name, O_CREAT | O_EXCL, pshared, value);
+// p->id = id;
+// if (p->sem != SEM_FAILED) break;
+// int e = errno;
+// if (e == EEXIST) continue;
+// if (e == EINTR) continue;
+// fprintf(stderr, "==%s[%d]%s():[%p]==not created[%d]%s\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem,
+// e, strerror(e));
+// abort();
+// } while (p->sem == SEM_FAILED);
+// #elif defined(SEM_USE_SEM)
+// taosThreadOnce(&sem_once, once_init);
+// if (sem_inited != 1) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==internal resource init failed\n", taosDirEntryBaseName(__FILE__), __LINE__,
+// __func__, sem);
+// errno = ENOMEM;
+// return -1;
+// }
+// kern_return_t ret = semaphore_create(sem_port, &p->sem, SYNC_POLICY_FIFO, value);
+// if (ret != KERN_SUCCESS) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==semophore_create failed\n", taosDirEntryBaseName(__FILE__), __LINE__,
+// __func__,
+// sem);
+// // we fail-fast here, because we have less-doc about semaphore_create for the moment
+// abort();
+// }
+// #else // SEM_USE_PTHREAD
+// p->sem = dispatch_semaphore_create(value);
+// if (p->sem == NULL) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==not created\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
+// abort();
+// }
+// #endif // SEM_USE_PTHREAD
+
+// p->valid = 1;
+
+// *sem = p;
+
+// return 0;
+// }
+
+// int tsem_wait(tsem_t *sem) {
+// if (!*sem) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
+// abort();
+// }
+// struct tsem_s *p = *sem;
+// if (!p->valid) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem); abort();
+// }
+// #ifdef SEM_USE_PTHREAD
+// if (taosThreadMutexLock(&p->lock)) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem);
+// abort();
+// }
+// p->val -= 1;
+// if (p->val < 0) {
+// if (taosThreadCondWait(&p->cond, &p->lock)) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__,
+// __func__,
+// sem);
+// abort();
+// }
+// }
+// if (taosThreadMutexUnlock(&p->lock)) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem);
+// abort();
+// }
+// return 0;
+// #elif defined(SEM_USE_POSIX)
+// return sem_wait(p->sem);
+// #elif defined(SEM_USE_SEM)
+// return semaphore_wait(p->sem);
+// #else // SEM_USE_PTHREAD
+// return dispatch_semaphore_wait(p->sem, DISPATCH_TIME_FOREVER);
+// #endif // SEM_USE_PTHREAD
+// }
+
+// int tsem_post(tsem_t *sem) {
+// if (!*sem) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
+// abort();
+// }
+// struct tsem_s *p = *sem;
+// if (!p->valid) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem); abort();
+// }
+// #ifdef SEM_USE_PTHREAD
+// if (taosThreadMutexLock(&p->lock)) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem);
+// abort();
+// }
+// p->val += 1;
+// if (p->val <= 0) {
+// if (taosThreadCondSignal(&p->cond)) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__,
+// __func__,
+// sem);
+// abort();
+// }
+// }
+// if (taosThreadMutexUnlock(&p->lock)) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem);
+// abort();
+// }
+// return 0;
+// #elif defined(SEM_USE_POSIX)
+// return sem_post(p->sem);
+// #elif defined(SEM_USE_SEM)
+// return semaphore_signal(p->sem);
+// #else // SEM_USE_PTHREAD
+// return dispatch_semaphore_signal(p->sem);
+// #endif // SEM_USE_PTHREAD
+// }
+
+// int tsem_destroy(tsem_t *sem) {
+// // fprintf(stderr, "==%s[%d]%s():[%p]==destroying\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
+// if (!*sem) {
+// // fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem);
+// // abort();
+// return 0;
+// }
+// struct tsem_s *p = *sem;
+// if (!p->valid) {
+// // fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// // sem); abort();
+// return 0;
+// }
+// #ifdef SEM_USE_PTHREAD
+// if (taosThreadMutexLock(&p->lock)) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem);
+// abort();
+// }
+// p->valid = 0;
+// if (taosThreadCondDestroy(&p->cond)) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem);
+// abort();
+// }
+// if (taosThreadMutexUnlock(&p->lock)) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem);
+// abort();
+// }
+// if (taosThreadMutexDestroy(&p->lock)) {
+// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem);
+// abort();
+// }
+// #elif defined(SEM_USE_POSIX)
+// char name[NAME_MAX - 4];
+// snprintf(name, sizeof(name), "/t" PRId64, p->id);
+// int r = sem_unlink(name);
+// if (r) {
+// int e = errno;
+// fprintf(stderr, "==%s[%d]%s():[%p]==unlink failed[%d]%s\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
+// sem,
+// e, strerror(e));
+// abort();
+// }
+// #elif defined(SEM_USE_SEM)
+// semaphore_destroy(sem_port, p->sem);
+// #else // SEM_USE_PTHREAD
+// #endif // SEM_USE_PTHREAD
+
+// p->valid = 0;
+// taosMemoryFree(p);
+
+// *sem = NULL;
+// return 0;
+// }
+
+int tsem_init(tsem_t *psem, int flags, unsigned int count) {
+ *psem = dispatch_semaphore_create(count);
+ if (*psem == NULL) return -1;
+ return 0;
+}
+
+int tsem_destroy(tsem_t *psem) {
+ return 0;
+}
+
+int tsem_post(tsem_t *psem) {
+ if (psem == NULL || *psem == NULL) return -1;
+ dispatch_semaphore_signal(*psem);
+ return 0;
+}
+
+int tsem_wait(tsem_t *psem) {
+ if (psem == NULL || *psem == NULL) return -1;
+ dispatch_semaphore_wait(*psem, DISPATCH_TIME_FOREVER);
+ return 0;
+}
+
+int tsem_timewait(tsem_t *psem, int64_t nanosecs) {
+ if (psem == NULL || *psem == NULL) return -1;
+ dispatch_semaphore_wait(*psem, nanosecs);
+ return 0;
+}
+
+bool taosCheckPthreadValid(TdThread thread) {
+ int32_t ret = taosThreadKill(thread, 0);
+ if (ret == ESRCH) return false;
+ if (ret == EINVAL) return false;
+ // alive
+ return true;
+}
+
+int64_t taosGetSelfPthreadId() {
+ TdThread thread = taosThreadSelf();
+ return (int64_t)thread;
+}
+
+int64_t taosGetPthreadId(TdThread thread) { return (int64_t)thread; }
+
+void taosResetPthread(TdThread *thread) { *thread = NULL; }
+
+bool taosComparePthread(TdThread first, TdThread second) { return taosThreadEqual(first, second) ? true : false; }
+
+int32_t taosGetPId() { return (int32_t)getpid(); }
+
+int32_t taosGetAppName(char *name, int32_t *len) {
+ char buf[PATH_MAX + 1];
+ buf[0] = '\0';
+ proc_name(getpid(), buf, sizeof(buf) - 1);
+ buf[PATH_MAX] = '\0';
+ size_t n = strlen(buf);
+ if (len) *len = n;
+ if (name) tstrncpy(name, buf, TSDB_APP_NAME_LEN);
+ return 0;
+}
+
+#else
+
+/*
+ * linux implementation
+ */
+
+#include
+#include
+
+bool taosCheckPthreadValid(TdThread thread) { return thread != 0; }
+
+int64_t taosGetSelfPthreadId() {
+ static __thread int id = 0;
+ if (id != 0) return id;
+ id = syscall(SYS_gettid);
+ return id;
+}
+
+int64_t taosGetPthreadId(TdThread thread) { return (int64_t)thread; }
+void taosResetPthread(TdThread* thread) { *thread = 0; }
+bool taosComparePthread(TdThread first, TdThread second) { return first == second; }
+
+int32_t taosGetPId() {
+ static int32_t pid;
+ if (pid != 0) return pid;
+ pid = getpid();
+ return pid;
+}
+
+int32_t taosGetAppName(char* name, int32_t* len) {
+ const char* self = "/proc/self/exe";
+ char path[PATH_MAX] = {0};
+
+ if (readlink(self, path, PATH_MAX) <= 0) {
+ return -1;
+ }
+
+ path[PATH_MAX - 1] = 0;
+ char* end = strrchr(path, '/');
+ if (end == NULL) {
+ return -1;
+ }
+
+ ++end;
+
+ tstrncpy(name, end, TSDB_APP_NAME_LEN);
+
+ if (len != NULL) {
+ *len = strlen(name);
+ }
+
+ return 0;
+}
+
+int32_t tsem_wait(tsem_t* sem) {
+ int ret = 0;
+ do {
+ ret = sem_wait(sem);
+ } while (ret != 0 && errno == EINTR);
+ return ret;
+}
+
+int32_t tsem_timewait(tsem_t* sem, int64_t nanosecs) {
+ int ret = 0;
+
+ struct timespec tv = {
+ .tv_sec = 0,
+ .tv_nsec = nanosecs,
+ };
+
+ while ((ret = sem_timedwait(sem, &tv)) == -1 && errno == EINTR) continue;
+
+ return ret;
+}
+
+#endif
diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c
index 7d7a14483a..7032f39744 100644
--- a/source/util/src/tcompare.c
+++ b/source/util/src/tcompare.c
@@ -247,6 +247,756 @@ int32_t compareJsonVal(const void *pLeft, const void *pRight) {
}
}
+int32_t compareInt8Int16(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Int32(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Int64(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Float(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Double(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint8(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint16(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint32(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint64(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Int8(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Int32(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Int64(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Float(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Double(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint8(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint16(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint32(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint64(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+
+int32_t compareInt32Int8(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Int16(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Int64(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Float(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Double(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint8(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint16(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint32(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint64(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Int8(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Int16(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Int32(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Float(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Double(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint8(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint16(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint32(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint64(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt8(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt16(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt32(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt64(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatDouble(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+
+ if (isnan(left) && isnan(right)) {
+ return 0;
+ }
+
+ if (isnan(left)) {
+ return -1;
+ }
+
+ if (isnan(right)) {
+ return 1;
+ }
+
+ if (FLT_EQUAL(left, right)) {
+ return 0;
+ }
+ return FLT_GREATER(left, right) ? 1 : -1;
+}
+
+int32_t compareFloatUint8(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatUint16(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatUint32(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatUint64(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt8(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt16(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt32(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt64(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleFloat(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+
+ if (isnan(left) && isnan(right)) {
+ return 0;
+ }
+
+ if (isnan(left)) {
+ return -1;
+ }
+
+ if (isnan(right)) {
+ return 1;
+ }
+
+ if (FLT_EQUAL(left, right)) {
+ return 0;
+ }
+ return FLT_GREATER(left, right) ? 1 : -1;
+}
+
+int32_t compareDoubleUint8(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleUint16(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleUint32(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleUint64(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int8(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int16(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int32(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int64(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Float(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Double(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Uint16(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Uint32(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Uint64(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int8(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int16(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int32(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int64(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Float(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Double(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Uint8(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Uint32(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Uint64(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int8(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int16(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int32(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int64(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Float(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Double(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Uint8(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Uint16(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Uint64(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int8(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int16(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int32(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int64(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Float(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Double(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Uint8(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Uint16(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Uint32(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+
int32_t compareJsonValDesc(const void *pLeft, const void *pRight) {
return compareJsonVal(pRight, pLeft);
}
diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c
index a2d65d6a54..06ebbf27fb 100644
--- a/source/util/src/tlog.c
+++ b/source/util/src/tlog.c
@@ -97,7 +97,7 @@ int32_t tqDebugFlag = 135;
int32_t fsDebugFlag = 135;
int32_t metaDebugFlag = 135;
int32_t udfDebugFlag = 135;
-int32_t smaDebugFlag = 135;
+int32_t smaDebugFlag = 131;
int32_t idxDebugFlag = 135;
int64_t dbgEmptyW = 0;
diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c
index 4d5532b9a6..2767fed937 100644
--- a/source/util/src/tpagedbuf.c
+++ b/source/util/src/tpagedbuf.c
@@ -371,7 +371,7 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
return TSDB_CODE_SUCCESS;
}
-void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) {
+void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) {
pBuf->statis.getPages += 1;
char* availablePage = NULL;
diff --git a/source/util/test/pageBufferTest.cpp b/source/util/test/pageBufferTest.cpp
index eaf198a483..1a057c5875 100644
--- a/source/util/test/pageBufferTest.cpp
+++ b/source/util/test/pageBufferTest.cpp
@@ -18,7 +18,7 @@ void simpleTest() {
int32_t pageId = 0;
int32_t groupId = 0;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
ASSERT_EQ(getTotalBufSize(pBuf), 1024);
@@ -29,26 +29,26 @@ void simpleTest() {
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t == pBufPage1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage4);
releaseBufPage(pBuf, pBufPage2);
- SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage5);
@@ -64,7 +64,7 @@ void writeDownTest() {
int32_t groupId = 0;
int32_t nx = 12345;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
*(int32_t*)(pBufPage->data) = nx;
@@ -73,22 +73,22 @@ void writeDownTest() {
setBufPageDirty(pBufPage, true);
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage1);
ASSERT_TRUE(pageId == 1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage2);
ASSERT_TRUE(pageId == 2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage3);
ASSERT_TRUE(pageId == 3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage4);
ASSERT_TRUE(pageId == 4);
@@ -113,32 +113,32 @@ void recyclePageTest() {
int32_t groupId = 0;
int32_t nx = 12345;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage1);
ASSERT_TRUE(pageId == 1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage2);
ASSERT_TRUE(pageId == 2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage3);
ASSERT_TRUE(pageId == 3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage4);
ASSERT_TRUE(pageId == 4);
releaseBufPage(pBuf, t4);
- SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t5 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t5 == pBufPage5);
ASSERT_TRUE(pageId == 5);
diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c
index ada2039460..f39d5e6528 100644
--- a/tests/script/api/batchprepare.c
+++ b/tests/script/api/batchprepare.c
@@ -2598,7 +2598,6 @@ void runAll(TAOS *taos) {
printf("%s Begin\n", gCaseCtrl.caseCatalog);
runCaseList(taos);
-#if 0
strcpy(gCaseCtrl.caseCatalog, "Micro DB precision Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.precision = TIME_PRECISION_MICRO;
@@ -2654,7 +2653,6 @@ void runAll(TAOS *taos) {
gCaseCtrl.bindColNum = 6;
runCaseList(taos);
gCaseCtrl.bindColNum = 0;
-#endif
/*
strcpy(gCaseCtrl.caseCatalog, "Bind Col Type Test");
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 97295d75e0..46bae734ea 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -344,6 +344,7 @@
# --- scalar ----
./test.sh -f tsim/scalar/in.sim
./test.sh -f tsim/scalar/scalar.sim
+./test.sh -f tsim/scalar/filter.sim
# ---- alter ----
./test.sh -f tsim/alter/cached_schema_after_alter.sim
diff --git a/tests/script/tsim/scalar/filter.sim b/tests/script/tsim/scalar/filter.sim
new file mode 100644
index 0000000000..9231662278
--- /dev/null
+++ b/tests/script/tsim/scalar/filter.sim
@@ -0,0 +1,38 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ======== step1
+sql drop database if exists db1;
+sql create database db1 vgroups 3;
+sql use db1;
+sql create stable st1 (fts timestamp, fbool bool, ftiny tinyint, fsmall smallint, fint int, fbig bigint, futiny tinyint unsigned, fusmall smallint unsigned, fuint int unsigned, fubig bigint unsigned, ffloat float, fdouble double, fbin binary(10), fnchar nchar(10)) tags(tts timestamp, tbool bool, ttiny tinyint, tsmall smallint, tint int, tbig bigint, tutiny tinyint unsigned, tusmall smallint unsigned, tuint int unsigned, tubig bigint unsigned, tfloat float, tdouble double, tbin binary(10), tnchar nchar(10));
+sql create table tb1 using st1 tags('2022-07-10 16:31:00', true, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql create table tb2 using st1 tags('2022-07-10 16:32:00', false, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql create table tb3 using st1 tags('2022-07-10 16:33:00', true, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+
+sql insert into tb1 values ('2022-07-10 16:31:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql insert into tb1 values ('2022-07-10 16:31:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql insert into tb1 values ('2022-07-10 16:31:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+sql insert into tb1 values ('2022-07-10 16:31:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd');
+sql insert into tb1 values ('2022-07-10 16:31:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e');
+
+sql insert into tb2 values ('2022-07-10 16:32:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql insert into tb2 values ('2022-07-10 16:32:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql insert into tb2 values ('2022-07-10 16:32:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+sql insert into tb2 values ('2022-07-10 16:32:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd');
+sql insert into tb2 values ('2022-07-10 16:32:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e');
+
+sql insert into tb3 values ('2022-07-10 16:33:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql insert into tb3 values ('2022-07-10 16:33:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql insert into tb3 values ('2022-07-10 16:33:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+sql insert into tb3 values ('2022-07-10 16:33:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd');
+sql insert into tb3 values ('2022-07-10 16:33:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e');
+
+sql select * from st1 where (ttiny > 2 or ftiny < 5) and ftiny > 2;
+if $rows != 7 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/system-test/7-tmq/stbTagFilter-1ctb.py b/tests/system-test/7-tmq/stbTagFilter-1ctb.py
index 526ff7181e..6cb152342b 100644
--- a/tests/system-test/7-tmq/stbTagFilter-1ctb.py
+++ b/tests/system-test/7-tmq/stbTagFilter-1ctb.py
@@ -250,15 +250,14 @@ class TDTestCase:
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.tmqCase1()
- # self.tmqCase2()
+ self.tmqCase2()
self.prepareTestEnv()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
self.tmqCase1()
- # self.tmqCase2()
-
+ self.tmqCase2()
def stop(self):
tdSql.close()
diff --git a/tests/system-test/7-tmq/tmq_taosx.py b/tests/system-test/7-tmq/tmq_taosx.py
index d38e509d26..07602ec29f 100644
--- a/tests/system-test/7-tmq/tmq_taosx.py
+++ b/tests/system-test/7-tmq/tmq_taosx.py
@@ -20,15 +20,9 @@ class TDTestCase:
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
- def checkFileContent(self):
- buildPath = tdCom.getBuildPath()
- cfgPath = tdCom.getClientCfgPath()
- cmdStr = '%s/build/bin/tmq_taosx_ci -c %s'%(buildPath, cfgPath)
- tdLog.info(cmdStr)
- os.system(cmdStr)
-
- srcFile = '%s/../log/tmq_taosx_tmp.source'%(cfgPath)
- dstFile = '%s/../log/tmq_taosx_tmp.result'%(cfgPath)
+ def checkJson(self, cfgPath, name):
+ srcFile = '%s/../log/%s.source'%(cfgPath, name)
+ dstFile = '%s/../log/%s.result'%(cfgPath, name)
tdLog.info("compare file: %s, %s"%(srcFile, dstFile))
consumeFile = open(srcFile, mode='r')
@@ -43,107 +37,31 @@ class TDTestCase:
tdLog.exit("compare error: %s != %s"%src, dst)
else:
break
-
- tdSql.execute('use db_taosx')
- tdSql.query("select * from ct3 order by c1 desc")
- tdSql.checkRows(2)
- tdSql.checkData(0, 1, 51)
- tdSql.checkData(0, 4, 940)
- tdSql.checkData(1, 1, 23)
- tdSql.checkData(1, 4, None)
-
- tdSql.query("select * from st1 order by ts")
- tdSql.checkRows(8)
- tdSql.checkData(0, 1, 1)
- tdSql.checkData(1, 1, 3)
- tdSql.checkData(4, 1, 4)
- tdSql.checkData(6, 1, 23)
-
- tdSql.checkData(0, 2, 2)
- tdSql.checkData(1, 2, 4)
- tdSql.checkData(4, 2, 3)
- tdSql.checkData(6, 2, 32)
-
- tdSql.checkData(0, 3, 'a')
- tdSql.checkData(1, 3, 'b')
- tdSql.checkData(4, 3, 'hwj')
- tdSql.checkData(6, 3, 's21ds')
-
- tdSql.checkData(0, 4, None)
- tdSql.checkData(1, 4, None)
- tdSql.checkData(5, 4, 940)
- tdSql.checkData(6, 4, None)
-
- tdSql.checkData(0, 5, 1000)
- tdSql.checkData(1, 5, 2000)
- tdSql.checkData(4, 5, 1000)
- tdSql.checkData(6, 5, 5000)
-
- tdSql.checkData(0, 6, 'ttt')
- tdSql.checkData(1, 6, None)
- tdSql.checkData(4, 6, 'ttt')
- tdSql.checkData(6, 6, None)
-
- tdSql.checkData(0, 7, True)
- tdSql.checkData(1, 7, None)
- tdSql.checkData(4, 7, True)
- tdSql.checkData(6, 7, None)
-
- tdSql.checkData(0, 8, None)
- tdSql.checkData(1, 8, None)
- tdSql.checkData(4, 8, None)
- tdSql.checkData(6, 8, None)
-
- tdSql.query("select * from ct1")
- tdSql.checkRows(4)
-
- tdSql.query("select * from ct2")
- tdSql.checkRows(0)
-
- tdSql.query("select * from ct0 order by c1")
- tdSql.checkRows(2)
- tdSql.checkData(0, 3, "a")
- tdSql.checkData(1, 4, None)
-
- tdSql.query("select * from n1 order by cc3 desc")
- tdSql.checkRows(2)
- tdSql.checkData(0, 1, "eeee")
- tdSql.checkData(1, 2, 940)
-
- tdSql.query("select * from jt order by i desc")
- tdSql.checkRows(2)
- tdSql.checkData(0, 1, 11)
- tdSql.checkData(0, 2, None)
- tdSql.checkData(1, 1, 1)
- tdSql.checkData(1, 2, '{"k1":1,"k2":"hello"}')
-
- tdSql.execute('drop topic if exists topic_ctb_column')
return
- def checkFileContentSnapshot(self):
- buildPath = tdCom.getBuildPath()
- cfgPath = tdCom.getClientCfgPath()
- cmdStr = '%s/build/bin/tmq_taosx_snapshot_ci -c %s'%(buildPath, cfgPath)
- tdLog.info(cmdStr)
- os.system(cmdStr)
+ def checkDropData(self):
+ tdSql.execute('use db_taosx')
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
+ tdSql.query("select * from jt order by i")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 11)
+ tdSql.checkData(0, 2, '{"k1":1,"k2":"hello"}')
+ tdSql.checkData(1, 2, None)
- srcFile = '%s/../log/tmq_taosx_tmp_snapshot.source'%(cfgPath)
- dstFile = '%s/../log/tmq_taosx_tmp_snapshot.result'%(cfgPath)
- tdLog.info("compare file: %s, %s"%(srcFile, dstFile))
-
- consumeFile = open(srcFile, mode='r')
- queryFile = open(dstFile, mode='r')
-
- while True:
- dst = queryFile.readline()
- src = consumeFile.readline()
-
- if dst:
- if dst != src:
- tdLog.exit("compare error: %s != %s"%src, dst)
- else:
- break
+ tdSql.execute('use abc1')
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
+ tdSql.query("select * from jt order by i")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 11)
+ tdSql.checkData(0, 2, '{"k1":1,"k2":"hello"}')
+ tdSql.checkData(1, 2, None)
+ return
+ def checkData(self):
tdSql.execute('use db_taosx')
tdSql.query("select * from ct3 order by c1 desc")
tdSql.checkRows(2)
@@ -216,13 +134,82 @@ class TDTestCase:
tdSql.checkData(0, 2, None)
tdSql.checkData(1, 1, 1)
tdSql.checkData(1, 2, '{"k1":1,"k2":"hello"}')
+ return
+
+ def checkWal1Vgroup(self):
+ buildPath = tdCom.getBuildPath()
+ cfgPath = tdCom.getClientCfgPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -c %s -sv 1 -dv 1'%(buildPath, cfgPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkJson(cfgPath, "tmq_taosx_tmp")
+ self.checkData()
+
+ return
+
+ def checkWalMultiVgroups(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -sv 3 -dv 5'%(buildPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkData()
+
+ return
+
+ def checkWalMultiVgroupsWithDropTable(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -sv 3 -dv 5 -d'%(buildPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkDropData()
+
+ return
+
+ def checkSnapshot1Vgroup(self):
+ buildPath = tdCom.getBuildPath()
+ cfgPath = tdCom.getClientCfgPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -c %s -sv 1 -dv 1 -s'%(buildPath, cfgPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkJson(cfgPath, "tmq_taosx_tmp_snapshot")
+ self.checkData()
+
+ return
+
+ def checkSnapshotMultiVgroups(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -sv 2 -dv 4 -s'%(buildPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkData()
+
+ return
+
+ def checkSnapshotMultiVgroupsWithDropTable(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -sv 2 -dv 4 -s -d'%(buildPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkDropData()
return
def run(self):
tdSql.prepare()
- self.checkFileContent()
- self.checkFileContentSnapshot()
+ self.checkWal1Vgroup()
+ self.checkSnapshot1Vgroup()
+
+ self.checkWalMultiVgroups()
+ self.checkSnapshotMultiVgroups()
+
+ self.checkWalMultiVgroupsWithDropTable()
+ self.checkSnapshotMultiVgroupsWithDropTable()
def stop(self):
tdSql.close()
diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt
index 0fb80e69c2..31331b5265 100644
--- a/tests/test/c/CMakeLists.txt
+++ b/tests/test/c/CMakeLists.txt
@@ -2,7 +2,6 @@ add_executable(tmq_demo tmqDemo.c)
add_executable(tmq_sim tmqSim.c)
add_executable(create_table createTable.c)
add_executable(tmq_taosx_ci tmq_taosx_ci.c)
-add_executable(tmq_taosx_snapshot_ci tmq_taosx_snapshot_ci.c)
add_executable(sml_test sml_test.c)
target_link_libraries(
create_table
@@ -32,13 +31,6 @@ target_link_libraries(
PUBLIC common
PUBLIC os
)
-target_link_libraries(
- tmq_taosx_snapshot_ci
- PUBLIC taos_static
- PUBLIC util
- PUBLIC common
- PUBLIC os
-)
target_link_libraries(
sml_test
diff --git a/tests/test/c/tmq_taosx_ci.c b/tests/test/c/tmq_taosx_ci.c
index ee5af03f05..2afa05b012 100644
--- a/tests/test/c/tmq_taosx_ci.c
+++ b/tests/test/c/tmq_taosx_ci.c
@@ -22,8 +22,16 @@
#include "types.h"
static int running = 1;
-TdFilePtr g_fp = NULL;
-char dir[64]={0};
+TdFilePtr g_fp = NULL;
+typedef struct{
+ bool snapShot;
+ bool dropTable;
+ int srcVgroups;
+ int dstVgroups;
+ char dir[64];
+}Config;
+
+Config g_conf = {0};
static TAOS* use_db(){
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
@@ -41,7 +49,6 @@ static TAOS* use_db(){
}
static void msg_process(TAOS_RES* msg) {
- /*memset(buf, 0, 1024);*/
printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg));
printf("db: %s\n", tmq_get_db_name(msg));
printf("vg: %d\n", tmq_get_vgroup_id(msg));
@@ -51,8 +58,11 @@ static void msg_process(TAOS_RES* msg) {
if (result) {
printf("meta result: %s\n", result);
}
- taosFprintfFile(g_fp, result);
- taosFprintfFile(g_fp, "\n");
+ if(g_fp){
+ taosFprintfFile(g_fp, result);
+ taosFprintfFile(g_fp, "\n");
+ }
+
tmq_free_json_meta(result);
}
@@ -61,22 +71,10 @@ static void msg_process(TAOS_RES* msg) {
int32_t ret = tmq_write_raw(pConn, raw);
printf("write raw data: %s\n", tmq_err2str(ret));
-// else{
-// while(1){
-// int numOfRows = 0;
-// void *pData = NULL;
-// taos_fetch_raw_block(msg, &numOfRows, &pData);
-// if(numOfRows == 0) break;
-// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows);
-// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg));
-// printf("write raw data: %s\n", tmq_err2str(ret));
-// }
-// }
-
taos_close(pConn);
}
-int32_t init_env() {
+int32_t init_env(Config *conf) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return -1;
@@ -89,13 +87,22 @@ int32_t init_env() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 1");
+ char sql[128] = {0};
+ snprintf(sql, 128, "create database if not exists db_taosx vgroups %d", conf->dstVgroups);
+ pRes = taos_query(pConn, sql);
if (taos_errno(pRes) != 0) {
printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
+ pRes = taos_query(pConn, "drop topic if exists topic_db");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop topic, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
pRes = taos_query(pConn, "drop database if exists abc1");
if (taos_errno(pRes) != 0) {
printf("error in drop db, reason:%s\n", taos_errstr(pRes));
@@ -103,7 +110,8 @@ int32_t init_env() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
+ snprintf(sql, 128, "create database if not exists abc1 vgroups %d", conf->srcVgroups);
+ pRes = taos_query(pConn, sql);
if (taos_errno(pRes) != 0) {
printf("error in create db, reason:%s\n", taos_errstr(pRes));
return -1;
@@ -133,7 +141,7 @@ int32_t init_env() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')");
+ pRes = taos_query(pConn, "insert into ct0 values(1626006833400, 1, 2, 'a')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
return -1;
@@ -168,7 +176,7 @@ int32_t init_env() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
+ pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833603, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
@@ -224,6 +232,22 @@ int32_t init_env() {
}
taos_free_result(pRes);
+ if(conf->dropTable){
+ pRes = taos_query(pConn, "drop table ct3 ct1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop table st1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+ }
+
pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
if (taos_errno(pRes) != 0) {
printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes));
@@ -273,6 +297,15 @@ int32_t init_env() {
}
taos_free_result(pRes);
+ if(conf->dropTable){
+ pRes = taos_query(pConn, "drop table n1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+ }
+
pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
if (taos_errno(pRes) != 0) {
printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
@@ -308,6 +341,23 @@ int32_t init_env() {
}
taos_free_result(pRes);
+ if(conf->dropTable){
+ pRes = taos_query(pConn,
+ "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
+ "nchar(8), t4 bool)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop table st1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+ }
taos_close(pConn);
return 0;
}
@@ -327,9 +377,9 @@ int32_t create_topic() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
+ pRes = taos_query(pConn, "create topic topic_db with meta as database abc1");
if (taos_errno(pRes) != 0) {
- printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
+ printf("failed to create topic topic_db, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
@@ -342,18 +392,7 @@ void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
printf("commit %d tmq %p param %p\n", code, tmq, param);
}
-tmq_t* build_consumer() {
-#if 0
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- assert(pConn != NULL);
-
- TAOS_RES* pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- }
- taos_free_result(pRes);
-#endif
-
+tmq_t* build_consumer(Config *config) {
tmq_conf_t* conf = tmq_conf_new();
tmq_conf_set(conf, "group.id", "tg2");
tmq_conf_set(conf, "client.id", "my app 1");
@@ -363,7 +402,9 @@ tmq_t* build_consumer() {
tmq_conf_set(conf, "enable.auto.commit", "true");
tmq_conf_set(conf, "enable.heartbeat.background", "true");
- /*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/
+ if(config->snapShot){
+ tmq_conf_set(conf, "experimental.snapshot.enable", "true");
+ }
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
@@ -374,8 +415,7 @@ tmq_t* build_consumer() {
tmq_list_t* build_topic_list() {
tmq_list_t* topic_list = tmq_list_new();
- tmq_list_append(topic_list, "topic_ctb_column");
- /*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/
+ tmq_list_append(topic_list, "topic_db");
return topic_list;
}
@@ -393,12 +433,7 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
if (tmqmessage) {
cnt++;
msg_process(tmqmessage);
- /*if (cnt >= 2) break;*/
- /*printf("get data\n");*/
taos_free_result(tmqmessage);
- /*} else {*/
- /*break;*/
- /*tmq_commit_sync(tmq, NULL);*/
}else{
break;
}
@@ -411,52 +446,18 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
fprintf(stderr, "%% Consumer closed\n");
}
-void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- static const int MIN_COMMIT_COUNT = 1;
-
- int msg_count = 0;
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- return;
- }
-
- tmq_list_t* subList = NULL;
- tmq_subscription(tmq, &subList);
- char** subTopics = tmq_list_to_c_array(subList);
- int32_t sz = tmq_list_get_size(subList);
- printf("subscribed topics: ");
- for (int32_t i = 0; i < sz; i++) {
- printf("%s, ", subTopics[i]);
- }
- printf("\n");
- tmq_list_destroy(subList);
-
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
- if (tmqmessage) {
- msg_process(tmqmessage);
- taos_free_result(tmqmessage);
-
- /*tmq_commit_sync(tmq, NULL);*/
- /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
-void initLogFile() {
+void initLogFile(Config *conf) {
char f1[256] = {0};
char f2[256] = {0};
- sprintf(f1, "%s/../log/tmq_taosx_tmp.source", dir);
- sprintf(f2, "%s/../log/tmq_taosx_tmp.result", dir);
+ if(conf->snapShot){
+ sprintf(f1, "%s/../log/tmq_taosx_tmp_snapshot.source", conf->dir);
+ sprintf(f2, "%s/../log/tmq_taosx_tmp_snapshot.result", conf->dir);
+ }else{
+ sprintf(f1, "%s/../log/tmq_taosx_tmp.source", conf->dir);
+ sprintf(f2, "%s/../log/tmq_taosx_tmp.result", conf->dir);
+ }
+
TdFilePtr pFile = taosOpenFile(f1, TD_FILE_TEXT | TD_FILE_TRUNC | TD_FILE_STREAM);
if (NULL == pFile) {
fprintf(stderr, "Failed to open %s for save result\n", f1);
@@ -469,53 +470,82 @@ void initLogFile() {
fprintf(stderr, "Failed to open %s for save result\n", f2);
exit(-1);
}
- char *result[] = {
- "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":16}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1}]}",
- "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}",
- "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}",
- "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[]}",
- "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":3000}]}",
- "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}",
- "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":7,\"colName\":\"c3\",\"colType\":8,\"colLength\":64}",
- "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":1,\"colName\":\"t2\",\"colType\":8,\"colLength\":64}",
- "{\"type\":\"alter\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"alterType\":4,\"colName\":\"t1\",\"colValue\":\"5000\",\"colValueNull\":false}",
- "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":10,\"length\":4}],\"tags\":[]}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":5,\"colName\":\"c3\",\"colType\":5}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":7,\"colName\":\"c2\",\"colType\":10,\"colLength\":8}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":10,\"colName\":\"c3\",\"colNewName\":\"cc3\"}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":9}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":6,\"colName\":\"c1\"}",
- "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}",
- "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}",
- "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}"
- };
- for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){
- taosFprintfFile(pFile2, result[i]);
- taosFprintfFile(pFile2, "\n");
+ if(conf->snapShot){
+ char *result[] = {
+ "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":64},{\"name\":\"c4\",\"type\":5}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1},{\"name\":\"t2\",\"type\":8,\"length\":64}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[]}",
+ "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":5000}]}",
+ "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c2\",\"type\":10,\"length\":8},{\"name\":\"cc3\",\"type\":5}],\"tags\":[]}",
+ "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}",
+ };
+
+ for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){
+ taosFprintfFile(pFile2, result[i]);
+ taosFprintfFile(pFile2, "\n");
+ }
+ }else{
+ char *result[] = {
+ "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":16}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[]}",
+ "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":3000}]}",
+ "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}",
+ "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":7,\"colName\":\"c3\",\"colType\":8,\"colLength\":64}",
+ "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":1,\"colName\":\"t2\",\"colType\":8,\"colLength\":64}",
+ "{\"type\":\"alter\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"alterType\":4,\"colName\":\"t1\",\"colValue\":\"5000\",\"colValueNull\":false}",
+ "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":10,\"length\":4}],\"tags\":[]}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":5,\"colName\":\"c3\",\"colType\":5}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":7,\"colName\":\"c2\",\"colType\":10,\"colLength\":8}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":10,\"colName\":\"c3\",\"colNewName\":\"cc3\"}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":9}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":6,\"colName\":\"c1\"}",
+ "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}"
+ };
+
+ for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){
+ taosFprintfFile(pFile2, result[i]);
+ taosFprintfFile(pFile2, "\n");
+ }
}
+
taosCloseFile(&pFile2);
}
int main(int argc, char* argv[]) {
- if(argc == 3 && strcmp(argv[1], "-c") == 0) {
- strcpy(dir, argv[2]);
- }else{
-// strcpy(dir, "../../../sim/psim/cfg");
- strcpy(dir, "/var/log");
+ for (int32_t i = 1; i < argc; i++) {
+ if(strcmp(argv[i], "-c") == 0){
+ strcpy(g_conf.dir, argv[++i]);
+ }else if(strcmp(argv[i], "-s") == 0){
+ g_conf.snapShot = true;
+ }else if(strcmp(argv[i], "-d") == 0){
+ g_conf.dropTable = true;
+ }else if(strcmp(argv[i], "-sv") == 0){
+ g_conf.srcVgroups = atol(argv[++i]);
+ }else if(strcmp(argv[i], "-dv") == 0){
+ g_conf.dstVgroups = atol(argv[++i]);
+ }
}
printf("env init\n");
- initLogFile();
+ if(strlen(g_conf.dir) != 0){
+ initLogFile(&g_conf);
+ }
- if (init_env() < 0) {
+ if (init_env(&g_conf) < 0) {
return -1;
}
create_topic();
- tmq_t* tmq = build_consumer();
+ tmq_t* tmq = build_consumer(&g_conf);
tmq_list_t* topic_list = build_topic_list();
basic_consume_loop(tmq, topic_list);
- /*sync_consume_loop(tmq, topic_list);*/
taosCloseFile(&g_fp);
}
diff --git a/tests/test/c/tmq_taosx_snapshot_ci.c b/tests/test/c/tmq_taosx_snapshot_ci.c
deleted file mode 100644
index e3a52f7cad..0000000000
--- a/tests/test/c/tmq_taosx_snapshot_ci.c
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#include
-#include
-#include
-#include
-#include
-#include "taos.h"
-#include "types.h"
-
-static int running = 1;
-TdFilePtr g_fp = NULL;
-char dir[64]={0};
-
-static TAOS* use_db(){
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return NULL;
- }
-
- TAOS_RES* pRes = taos_query(pConn, "use db_taosx");
- if (taos_errno(pRes) != 0) {
- printf("error in use db_taosx, reason:%s\n", taos_errstr(pRes));
- return NULL;
- }
- taos_free_result(pRes);
- return pConn;
-}
-
-static void msg_process(TAOS_RES* msg) {
- /*memset(buf, 0, 1024);*/
- printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg));
- printf("db: %s\n", tmq_get_db_name(msg));
- printf("vg: %d\n", tmq_get_vgroup_id(msg));
- TAOS *pConn = use_db();
- if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
- char* result = tmq_get_json_meta(msg);
- if (result) {
- printf("meta result: %s\n", result);
- }
- taosFprintfFile(g_fp, result);
- taosFprintfFile(g_fp, "\n");
- tmq_free_json_meta(result);
- }
-
- tmq_raw_data raw = {0};
- tmq_get_raw(msg, &raw);
- int32_t ret = tmq_write_raw(pConn, raw);
- printf("write raw data: %s\n", tmq_err2str(ret));
-
-// else{
-// while(1){
-// int numOfRows = 0;
-// void *pData = NULL;
-// taos_fetch_raw_block(msg, &numOfRows, &pData);
-// if(numOfRows == 0) break;
-// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows);
-// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg));
-// printf("write raw data: %s\n", tmq_err2str(ret));
-// }
-// }
-
- taos_close(pConn);
-}
-
-int32_t init_env() {
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return -1;
- }
-
- TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 1");
- if (taos_errno(pRes) != 0) {
- printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop database if exists abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
- if (taos_errno(pRes) != 0) {
- printf("error in create db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn,
- "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
- "nchar(8), t4 bool)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct1 using st1(t1) tags(2000)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct2 using st1(t1) tags(NULL)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct2, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct1 values(1626006833600, 3, 4, 'b')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct3 using st1(t1) tags(3000)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 add column c4 bigint");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 values(1626006833605, 53, 63, 'cffffffffffffffffffffffffffff', 8989898899999) (1626006833609, 51, 62, 'c333', 940)");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 select * from ct1");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 add tag t2 binary(64)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table ct3 set tag t1=5000");
- if (taos_errno(pRes) != 0) {
- printf("failed to slter child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "delete from abc1 .ct3 where ts < 1626006833606");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
- if (taos_errno(pRes) != 0) {
- printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 add column c3 bigint");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 modify column c2 nchar(8)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 rename column c3 cc3");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 comment 'hello'");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 drop column c1");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into n1 values(now, 'eeee', 8989898899999) (now+9s, 'c333', 940)");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt1 using jt tags('{\"k1\":1, \"k2\":\"hello\"}')");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt2 using jt tags('')");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into jt1 values(now, 1)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into jt2 values(now, 11)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- taos_close(pConn);
- return 0;
-}
-
-int32_t create_topic() {
- printf("create topic\n");
- TAOS_RES* pRes;
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return -1;
- }
-
- pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
- if (taos_errno(pRes) != 0) {
- printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- taos_close(pConn);
- return 0;
-}
-
-void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
- printf("commit %d tmq %p param %p\n", code, tmq, param);
-}
-
-tmq_t* build_consumer() {
-#if 0
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- assert(pConn != NULL);
-
- TAOS_RES* pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- }
- taos_free_result(pRes);
-#endif
-
- tmq_conf_t* conf = tmq_conf_new();
- tmq_conf_set(conf, "group.id", "tg2");
- tmq_conf_set(conf, "client.id", "my app 1");
- tmq_conf_set(conf, "td.connect.user", "root");
- tmq_conf_set(conf, "td.connect.pass", "taosdata");
- tmq_conf_set(conf, "msg.with.table.name", "true");
- tmq_conf_set(conf, "enable.auto.commit", "true");
- tmq_conf_set(conf, "enable.heartbeat.background", "true");
- tmq_conf_set(conf, "experimental.snapshot.enable", "true");
- /*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/
-
- tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
- tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
- assert(tmq);
- tmq_conf_destroy(conf);
- return tmq;
-}
-
-tmq_list_t* build_topic_list() {
- tmq_list_t* topic_list = tmq_list_new();
- tmq_list_append(topic_list, "topic_ctb_column");
- /*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/
- return topic_list;
-}
-
-void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- printf("subscribe err\n");
- return;
- }
- int32_t cnt = 0;
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
- if (tmqmessage) {
- cnt++;
- msg_process(tmqmessage);
- /*if (cnt >= 2) break;*/
- /*printf("get data\n");*/
- taos_free_result(tmqmessage);
- /*} else {*/
- /*break;*/
- /*tmq_commit_sync(tmq, NULL);*/
- }else{
- break;
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
-void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- static const int MIN_COMMIT_COUNT = 1;
-
- int msg_count = 0;
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- return;
- }
-
- tmq_list_t* subList = NULL;
- tmq_subscription(tmq, &subList);
- char** subTopics = tmq_list_to_c_array(subList);
- int32_t sz = tmq_list_get_size(subList);
- printf("subscribed topics: ");
- for (int32_t i = 0; i < sz; i++) {
- printf("%s, ", subTopics[i]);
- }
- printf("\n");
- tmq_list_destroy(subList);
-
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
- if (tmqmessage) {
- msg_process(tmqmessage);
- taos_free_result(tmqmessage);
-
- /*tmq_commit_sync(tmq, NULL);*/
- /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
-void initLogFile() {
- char f1[256] = {0};
- char f2[256] = {0};
-
- sprintf(f1, "%s/../log/tmq_taosx_tmp_snapshot.source", dir);
- sprintf(f2, "%s/../log/tmq_taosx_tmp_snapshot.result", dir);
- TdFilePtr pFile = taosOpenFile(f1, TD_FILE_TEXT | TD_FILE_TRUNC | TD_FILE_STREAM);
- if (NULL == pFile) {
- fprintf(stderr, "Failed to open %s for save result\n", f1);
- exit(-1);
- }
- g_fp = pFile;
-
- TdFilePtr pFile2 = taosOpenFile(f2, TD_FILE_TEXT | TD_FILE_TRUNC | TD_FILE_STREAM);
- if (NULL == pFile2) {
- fprintf(stderr, "Failed to open %s for save result\n", f2);
- exit(-1);
- }
- char *result[] = {
- "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":64},{\"name\":\"c4\",\"type\":5}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1},{\"name\":\"t2\",\"type\":8,\"length\":64}]}",
- "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}",
- "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}",
- "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[]}",
- "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":5000}]}",
- "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c2\",\"type\":10,\"length\":8},{\"name\":\"cc3\",\"type\":5}],\"tags\":[]}",
- "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}",
- "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}",
- "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}",
- };
-
- for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){
- taosFprintfFile(pFile2, result[i]);
- taosFprintfFile(pFile2, "\n");
- }
- taosCloseFile(&pFile2);
-}
-
-int main(int argc, char* argv[]) {
- if(argc == 3 && strcmp(argv[1], "-c") == 0) {
- strcpy(dir, argv[2]);
- }else{
-// strcpy(dir, "../../../sim/psim/cfg");
- strcpy(dir, "/var/log");
- }
-
- printf("env init\n");
- initLogFile();
-
- if (init_env() < 0) {
- return -1;
- }
- create_topic();
-
- tmq_t* tmq = build_consumer();
- tmq_list_t* topic_list = build_topic_list();
- basic_consume_loop(tmq, topic_list);
- /*sync_consume_loop(tmq, topic_list);*/
- taosCloseFile(&g_fp);
-}
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index d27fb99ef7..03097e31b9 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -82,6 +82,7 @@ ELSE ()
COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
)
EXECUTE_PROCESS(
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
COMMAND git rev-parse --short HEAD
RESULT_VARIABLE commit_sha1
OUTPUT_VARIABLE taosadapter_commit_sha1
@@ -118,8 +119,8 @@ ELSE ()
PATCH_COMMAND
COMMAND git clean -f -d
BUILD_COMMAND
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
COMMAND ${_upx_prefix}/src/upx/upx taosadapter
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
@@ -141,8 +142,8 @@ ELSE ()
PATCH_COMMAND
COMMAND git clean -f -d
BUILD_COMMAND
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
@@ -174,8 +175,8 @@ ELSE ()
BUILD_COMMAND
COMMAND set CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client
COMMAND set CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib
- COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
- COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
COMMAND ${_upx_prefix}/src/upx/upx taosadapter.exe
COMMAND cmake -E copy taosadapter.exe ${CMAKE_BINARY_DIR}/build/bin