Merge remote-tracking branch 'origin/3.0' into enh/3.0_planner_optimize
|
@ -46,7 +46,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
|
|||
|
||||
MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.")
|
||||
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
|
||||
MESSAGE("Current system arch is arm64")
|
||||
MESSAGE("Current system arch is 64")
|
||||
SET(TD_DARWIN_64 TRUE)
|
||||
ADD_DEFINITIONS("-D_TD_DARWIN_64")
|
||||
ENDIF ()
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# taos-tools
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG e8bfca6
|
||||
GIT_TAG 9cb965f
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
|
@ -4,25 +4,24 @@ sidebar_label: Documentation Home
|
|||
slug: /
|
||||
---
|
||||
|
||||
|
||||
TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
|
||||
TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
|
||||
|
||||
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
|
||||
|
||||
TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly.
|
||||
TDengine greatly improves the efficiency of data ingestion, querying, and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [Concepts](./concept) thoroughly.
|
||||
|
||||
If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work.
|
||||
If you are a developer, please read the [Developer Guide](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, and make a few changes to accommodate your application, and it will work.
|
||||
|
||||
We live in the era of big data, and scale-up is unable to meet the growing needs of business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster deployment"](../deployment).
|
||||
We live in the era of big data, and scale-up is unable to meet the growing needs of the business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to [Cluster Deployment](../deployment).
|
||||
|
||||
TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions.
|
||||
TDengine uses ubiquitous SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll-up, interpolation, and time-weighted average, among many others. The [SQL Reference](./taos-sql) chapter describes the SQL syntax in detail and lists the various supported commands and functions.
|
||||
|
||||
If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section.
|
||||
If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the [Administration](./operation) section.
|
||||
|
||||
If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter.
|
||||
If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the [Reference](./reference) chapter.
|
||||
|
||||
If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
|
||||
If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
|
||||
|
||||
TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
|
||||
TDengine is an open-source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
|
||||
|
||||
Together, we make a difference.
|
||||
Together, we make a difference!
|
||||
|
|
|
@ -3,7 +3,7 @@ title: Introduction
|
|||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](/develop/cache), [stream processing](/develop/stream), [data subscription](/develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
|
||||
TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
|
||||
|
||||
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
|
||||
|
||||
|
@ -12,34 +12,34 @@ This section introduces the major features, competitive advantages, typical use-
|
|||
The major features are listed below:
|
||||
|
||||
1. Insert data
|
||||
* supports [using SQL to insert](/develop/insert-data/sql-writing).
|
||||
* supports [schemaless writing](/reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) among others.
|
||||
* supports seamless integration with third-party tools like [Telegraf](/third-party/telegraf/), [Prometheus](/third-party/prometheus/), [collectd](/third-party/collectd/), [StatsD](/third-party/statsd/), [TCollector](/third-party/tcollector/) and [icinga2/](/third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code.
|
||||
- Supports [using SQL to insert](../develop/insert-data/sql-writing).
|
||||
- Supports [schemaless writing](../reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB Line](../develop/insert-data/influxdb-line), [OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json) among others.
|
||||
- Supports seamless integration with third-party tools like [Telegraf](../third-party/telegraf/), [Prometheus](../third-party/prometheus/), [collectd](../third-party/collectd/), [StatsD](../third-party/statsd/), [TCollector](../third-party/tcollector/), [EMQX](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker), and [Icinga2](../third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code.
|
||||
2. Query data
|
||||
* supports standard [SQL](/taos-sql/), including nested query.
|
||||
* supports [time series specific functions](/taos-sql/function/#time-series-extensions) and [time series specific queries](/taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others.
|
||||
* supports [user defined functions](/taos-sql/udf).
|
||||
3. [Caching](/develop/cache/): TDengine always saves the last data point in cache, so Redis is not needed for time-series data processing.
|
||||
4. [Stream Processing](/develop/stream/): not only is the continuous query is supported, but TDengine also supports even driven stream processing, so Flink or spark is not needed for time-series daata processing.
|
||||
5. [Data Dubscription](/develop/tmq/): application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
|
||||
- Supports standard [SQL](../taos-sql/), including nested query.
|
||||
- Supports [time series specific functions](../taos-sql/function/#time-series-extensions) and [time series specific queries](../taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others.
|
||||
- Supports [User Defined Functions (UDF)](../taos-sql/udf).
|
||||
3. [Caching](../develop/cache/): TDengine always saves the last data point in cache, so Redis is not needed for time-series data processing.
|
||||
4. [Stream Processing](../develop/stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing.
|
||||
5. [Data Subscription](../develop/tmq/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
|
||||
6. Visualization
|
||||
* supports seamless integration with [Grafana](/third-party/grafana/) for visualization.
|
||||
* supports seamless integration with Google Data Studio.
|
||||
- Supports seamless integration with [Grafana](../third-party/grafana/) for visualization.
|
||||
- Supports seamless integration with Google Data Studio.
|
||||
7. Cluster
|
||||
* supports [cluster](/deployment/) with the capability of increasing processing power by adding more nodes.
|
||||
* supports [deployment on Kubernetes](/deployment/k8s/)
|
||||
* supports high availability via data replication.
|
||||
- Supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
|
||||
- Supports [deployment on Kubernetes](../deployment/k8s/).
|
||||
- Supports high availability via data replication.
|
||||
8. Administration
|
||||
* provides [monitoring](/operation/monitor) on running instances of TDengine.
|
||||
* provides many ways to [import](/operation/import) and [export](/operation/export) data.
|
||||
- Provides [monitoring](../operation/monitor) on running instances of TDengine.
|
||||
- Provides many ways to [import](../operation/import) and [export](../operation/export) data.
|
||||
9. Tools
|
||||
* provides an interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc queries.
|
||||
* provides a tool [taosBenchmark](/reference/taosbenchmark/) for testing the performance of TDengine.
|
||||
- Provides an interactive [Command-line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries.
|
||||
- Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
|
||||
10. Programming
|
||||
* provides [connectors](/reference/connector/) for [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) and other programming languages.
|
||||
* provides a [REST API](/reference/rest-api/).
|
||||
- Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
|
||||
- Provides a [REST API](../reference/rest-api/).
|
||||
|
||||
For more details on features, please read through the entire documentation.
|
||||
For more details on features, please read through the entire documentation.
|
||||
|
||||
## Competitive Advantages
|
||||
|
||||
|
@ -49,23 +49,31 @@ By making full use of [characteristics of time series data](https://tdengine.com
|
|||
|
||||
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
|
||||
|
||||
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
|
||||
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds.
|
||||
|
||||
- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[
|
||||
](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
|
||||
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
|
||||
|
||||
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
|
||||
|
||||
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly;2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly;3: With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
|
||||
With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
|
||||
|
||||
1. With its superior performance, the computing and storage resources are reduced significantly.
|
||||
2. With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly.
|
||||
3. With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
|
||||
|
||||
## Technical Ecosystem
|
||||
|
||||
This is how TDengine would be situated, in a typical time-series data processing platform:
|
||||
|
||||
<figure>
|
||||
|
||||

|
||||
|
||||
<center>Figure 1. TDengine Technical Ecosystem</center>
|
||||
<center><figcaption>Figure 1. TDengine Technical Ecosystem</figcaption></center>
|
||||
</figure>
|
||||
|
||||
On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance.
|
||||
|
||||
|
@ -75,42 +83,42 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
|
|||
|
||||
### Characteristics and Requirements of Data Sources
|
||||
|
||||
| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
|
||||
| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry.|
|
||||
| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
|
||||
| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
|
||||
| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------ | ------------------ | ----------------------- | ------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry. |
|
||||
| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
|
||||
| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
|
||||
|
||||
### System Architecture Requirements
|
||||
|
||||
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ----------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| A simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions. There is no need to integrate any additional third-party products. |
|
||||
| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
|
||||
| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
|
||||
| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
|
||||
| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
|
||||
|
||||
### System Function Requirements
|
||||
|
||||
| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level.|
|
||||
| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
|
||||
| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| -------------------------------------------- | ------------------ | ----------------------- | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level. |
|
||||
| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
|
||||
|
||||
### System Performance Requirements
|
||||
|
||||
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
|
||||
| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products.|
|
||||
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
|
||||
| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
|
||||
| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
|
||||
| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
|
||||
|
||||
### System Maintenance Requirements
|
||||
|
||||
| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
|
||||
| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
|
||||
| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.|
|
||||
| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.|
|
||||
| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
|
||||
| --------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
|
||||
| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs. |
|
||||
| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine. |
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
|
|
|
@ -162,7 +162,7 @@ To better understand the data model using metri, tags, super table and subtable,
|
|||
|
||||
## Database
|
||||
|
||||
A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases.
|
||||
A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases.
|
||||
|
||||
In a database, there can be one or more STables, but a STable belongs to only one database. All tables owned by a STable are stored in only one database.
|
||||
|
||||
|
|
|
@ -279,6 +279,6 @@ Prior to establishing connection, please make sure TDengine is already running a
|
|||
</Tabs>
|
||||
|
||||
:::tip
|
||||
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.taosdata.com/train-faq/faq).
|
||||
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
|
||||
|
||||
:::
|
||||
|
|
|
@ -16,7 +16,7 @@ To achieve high performance writing, there are a few aspects to consider. In the
|
|||
|
||||
From the perspective of application program, you need to consider:
|
||||
|
||||
1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB. It can be configured by parameter `maxSQLLength` on client side, and the default value is 65,480.
|
||||
1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB.
|
||||
|
||||
2. The number of concurrent connections. Normally more connections can get better result. However, once the number of connections exceeds the processing ability of the server side, the performance may downgrade.
|
||||
|
||||
|
@ -46,12 +46,9 @@ If the data source is Kafka, then the appication program is a consumer of Kafka,
|
|||
|
||||
### Tune TDengine
|
||||
|
||||
TDengine is a distributed and high performance time series database, there are also some ways to tune TDengine to get better writing performance.
|
||||
On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
|
||||
|
||||
1. Set proper number of `vgroups` according to available CPU cores. Normally, we recommend 2 \* number_of_cores as a starting point. If the verification result shows this is not enough to utilize CPU resources, you can use a higher value.
|
||||
2. Set proper `minTablesPerVnode`, `tableIncStepPerVnode`, and `maxVgroupsPerDb` according to the number of tables so that tables are distributed even across vgroups. The purpose is to balance the workload among all vnodes so that system resources can be utilized better to get higher performance.
|
||||
|
||||
For more performance tuning parameters, please refer to [Configuration Parameters](../../../reference/config).
|
||||
For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。
|
||||
|
||||
## Sample Programs
|
||||
|
||||
|
@ -359,7 +356,7 @@ Writing process tries to read as much as possible data from message queue and wr
|
|||
|
||||
<details>
|
||||
|
||||
SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, if the SQL length is closed to `maxSQLLength` the SQL will be executed immediately. To improve writing efficiency, it's better to increase `maxSQLLength` properly.
|
||||
SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, and passes the maximum SQL length by parameter maxSQLLength according to actual TDengine limit.
|
||||
|
||||
<summary>SQLWriter</summary>
|
||||
|
||||
|
|
|
@ -39,18 +39,18 @@ To get the hostname on any host, the command `hostname -f` can be executed.
|
|||
|
||||
On the physical machine running the application, ping the dnode that is running taosd. If the dnode is not accessible, the application cannot connect to taosd. In this case, verify the DNS and hosts settings on the physical node running the application.
|
||||
|
||||
The end point of each dnode is the output hostname and port, such as h1.taosdata.com:6030.
|
||||
The end point of each dnode is the output hostname and port, such as h1.tdengine.com:6030.
|
||||
|
||||
### Step 5
|
||||
|
||||
Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following.
|
||||
Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.tdengine.com:6030", its `taos.cfg` is configured as following.
|
||||
|
||||
```c
|
||||
// firstEp is the end point to connect to when any dnode starts
|
||||
firstEp h1.taosdata.com:6030
|
||||
firstEp h1.tdengine.com:6030
|
||||
|
||||
// must be configured to the FQDN of the host where the dnode is launched
|
||||
fqdn h1.taosdata.com
|
||||
fqdn h1.tdengine.com
|
||||
|
||||
// the port used by the dnode, default is 6030
|
||||
serverPort 6030
|
||||
|
@ -76,13 +76,13 @@ The first dnode can be started following the instructions in [Get Started](/get-
|
|||
taos> show dnodes;
|
||||
id | endpoint | vnodes | support_vnodes | status | create_time | note |
|
||||
============================================================================================================================================
|
||||
1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
|
||||
1 | h1.tdengine.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
|
||||
Query OK, 1 rows affected (0.007984s)
|
||||
|
||||
|
||||
```
|
||||
|
||||
From the above output, it is shown that the end point of the started dnode is "h1.taosdata.com:6030", which is the `firstEp` of the cluster.
|
||||
From the above output, it is shown that the end point of the started dnode is "h1.tdengine.com:6030", which is the `firstEp` of the cluster.
|
||||
|
||||
## Add DNODE
|
||||
|
||||
|
@ -90,7 +90,7 @@ There are a few steps necessary to add other dnodes in the cluster.
|
|||
|
||||
Second, we can start `taosd` as instructed in [Get Started](/get-started/).
|
||||
|
||||
Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command:
|
||||
Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `taos` to execute the following command:
|
||||
|
||||
```sql
|
||||
CREATE DNODE "h2.taos.com:6030";
|
||||
|
@ -98,7 +98,7 @@ CREATE DNODE "h2.taos.com:6030";
|
|||
|
||||
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
|
||||
|
||||
Then on the first dnode h1.taosdata.com, execute `show dnodes` in `taos`
|
||||
Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos`
|
||||
|
||||
```sql
|
||||
SHOW DNODES;
|
||||
|
|
|
@ -152,7 +152,7 @@ clusterDomainSuffix: ""
|
|||
# converting an upper-snake-cased variable like `TAOS_DEBUG_FLAG`,
|
||||
# to a camelCase taos config variable `debugFlag`.
|
||||
#
|
||||
# See the variable list at https://www.taosdata.com/cn/documentation/administrator .
|
||||
# See the [Configuration Variables](../../reference/config)
|
||||
#
|
||||
# Note:
|
||||
# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
sidebar_label: Permissions Management
|
||||
title: Permissions Management
|
||||
sidebar_label: Access Control
|
||||
title: User and Access Control
|
||||
description: Manage user and user's permission
|
||||
---
|
||||
|
||||
This document describes how to manage permissions in TDengine.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: TDengine SQL
|
||||
description: "The syntax supported by TDengine SQL "
|
||||
description: 'The syntax supported by TDengine SQL '
|
||||
---
|
||||
|
||||
This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. For information about how TDengine SQL has changed, see [Changes in TDengine 3.0](../taos-sql/changes).
|
||||
|
@ -15,7 +15,7 @@ Syntax Specifications used in this chapter:
|
|||
- | means one of a few options, excluding | itself.
|
||||
- … means the item prior to it can be repeated multiple times.
|
||||
|
||||
To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
|
||||
To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
|
||||
|
||||
```
|
||||
taos> DESCRIBE meters;
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
title: Install & Uninstall
|
||||
title: Install and Uninstall
|
||||
description: Install, Uninstall, Start, Stop and Upgrade
|
||||
---
|
||||
|
||||
import Tabs from "@theme/Tabs";
|
||||
import TabItem from "@theme/TabItem";
|
||||
|
||||
TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers.
|
||||
This document gives more information about installing, uninstalling, and upgrading TDengine.
|
||||
|
||||
## Install
|
||||
|
||||
|
@ -56,7 +56,7 @@ Removing taostools (2.1.2) ...
|
|||
|
||||
Deb package of TDengine can be uninstalled as below:
|
||||
|
||||
```bash
|
||||
```
|
||||
$ sudo dpkg -r tdengine
|
||||
(Reading database ... 137504 files and directories currently installed.)
|
||||
Removing tdengine (3.0.0.0) ...
|
||||
|
@ -110,109 +110,57 @@ Start to uninstall taos tools ...
|
|||
taos tools is uninstalled successfully!
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem label="Windows uninstall" value="windows">
|
||||
Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system.
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::note
|
||||
:::info
|
||||
|
||||
- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine.
|
||||
- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
|
||||
- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. The packages may affect each other and cause errors.
|
||||
|
||||
```bash
|
||||
$ sudo rm -f /var/lib/dpkg/info/tdengine*
|
||||
```
|
||||
- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
|
||||
|
||||
- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
|
||||
```
|
||||
$ sudo rm -f /var/lib/dpkg/info/tdengine*
|
||||
```
|
||||
|
||||
```bash
|
||||
$ sudo rpm -e --noscripts tdengine
|
||||
```
|
||||
You can then reinstall if needed.
|
||||
|
||||
- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
|
||||
|
||||
```
|
||||
$ sudo rpm -e --noscripts tdengine
|
||||
```
|
||||
|
||||
You can then reinstall if needed.
|
||||
|
||||
:::
|
||||
|
||||
## Installation Directory
|
||||
|
||||
TDengine is installed at /usr/local/taos if successful.
|
||||
|
||||
```bash
|
||||
$ cd /usr/local/taos
|
||||
$ ll
|
||||
$ ll
|
||||
total 28
|
||||
drwxr-xr-x 7 root root 4096 Feb 22 09:34 ./
|
||||
drwxr-xr-x 12 root root 4096 Feb 22 09:34 ../
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 bin/
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 cfg/
|
||||
lrwxrwxrwx 1 root root 13 Feb 22 09:34 data -> /var/lib/taos/
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 driver/
|
||||
drwxr-xr-x 10 root root 4096 Feb 22 09:34 examples/
|
||||
drwxr-xr-x 2 root root 4096 Feb 22 09:34 include/
|
||||
lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
|
||||
```
|
||||
|
||||
During the installation process:
|
||||
|
||||
- Configuration directory, data directory, and log directory are created automatically if they don't exist
|
||||
- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg
|
||||
- The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data
|
||||
- The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log
|
||||
- The executables at /usr/local/taos/bin are linked to /usr/bin
|
||||
- The DLL files at /usr/local/taos/driver are linked to /usr/lib
|
||||
- The header files at /usr/local/taos/include are linked to /usr/include
|
||||
|
||||
:::note
|
||||
Uninstalling and Modifying Files
|
||||
|
||||
- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
|
||||
|
||||
- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
|
||||
|
||||
## Start and Stop
|
||||
|
||||
Linux system services `systemd`, `systemctl` or `service` are used to start, stop and restart TDengine. The server process of TDengine is `taosd`, which is started automatically after the Linux system is started. System operators can use `systemd`, `systemctl` or `service` to start, stop or restart TDengine server.
|
||||
|
||||
For example, if using `systemctl` , the commands to start, stop, restart and check TDengine server are below:
|
||||
|
||||
- Start server:`systemctl start taosd`
|
||||
|
||||
- Stop server:`systemctl stop taosd`
|
||||
|
||||
- Restart server:`systemctl restart taosd`
|
||||
|
||||
- Check server status:`systemctl status taosd`
|
||||
|
||||
Another component named as `taosAdapter` is to provide HTTP service for TDengine, it should be started and stopped using `systemctl`.
|
||||
|
||||
If the server process is OK, the output of `systemctl status` is like below:
|
||||
|
||||
```
|
||||
Active: active (running)
|
||||
```
|
||||
|
||||
Otherwise, the output is as below:
|
||||
|
||||
```
|
||||
Active: inactive (dead)
|
||||
```
|
||||
|
||||
## Upgrade
|
||||
|
||||
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
|
||||
|
||||
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
|
||||
|
||||
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
|
||||
|
||||
- Stop inserting data
|
||||
- Make sure all data is persisted to disk
|
||||
- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.)
|
||||
- Stop the cluster of TDengine
|
||||
- Uninstall old version and install new version
|
||||
- Start the cluster of TDengine
|
||||
- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
|
||||
- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
|
||||
- Run some simple data insertion statements to make sure the cluster works well
|
||||
- Restore business services
|
||||
|
||||
:::warning
|
||||
|
||||
TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version.
|
||||
|
||||
:::
|
||||
|
|
|
@ -18,12 +18,12 @@ If the TDengine server is already installed, it can be verified as follows:
|
|||
|
||||
The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment.
|
||||
|
||||
The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
|
||||
The following example lists all databases on the host h1.tdengine.com. To use it in your environment, replace `h1.tdengine.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
|
||||
|
||||
```bash
|
||||
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
|
||||
-d "select name, ntables, status from information_schema.ins_databases;" \
|
||||
h1.taosdata.com:6041/rest/sql
|
||||
h1.tdengine.com:6041/rest/sql
|
||||
```
|
||||
|
||||
The following return value results indicate that the verification passed.
|
||||
|
|
|
@ -133,8 +133,6 @@ The configuration parameters in the URL are as follows:
|
|||
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is true. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large.
|
||||
- batchErrorIgnore:true: When executing statement executeBatch, if there is a SQL execution failure in the middle, the following SQL will continue to be executed. false: No more statements after the failed SQL are executed. The default value is: false.
|
||||
|
||||
For more information about JDBC native connections, see [Video Tutorial](https://www.taosdata.com/blog/2020/11/11/1955.html).
|
||||
|
||||
**Connect using the TDengine client-driven configuration file **
|
||||
|
||||
When you use a JDBC native connection to connect to a TDengine cluster, you can use the TDengine client driver configuration file to specify parameters such as `firstEp` and `secondEp` of the cluster in the configuration file as below:
|
||||
|
|
|
@ -172,7 +172,6 @@ namespace TDengineExample
|
|||
`Taos` is an ADO.NET connector for TDengine, supporting Linux and Windows platforms. Community contributor `Maikebing@@maikebing contributes the connector`. Please refer to:
|
||||
|
||||
* Interface download:<https://github.com/maikebing/Maikebing.EntityFrameworkCore.Taos>
|
||||
* Usage notes:<https://www.taosdata.com/blog/2020/11/02/1901.html>
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
:::info
|
||||
|
||||
Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding [Windows client](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) for TDengine.
|
||||
Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package).
|
||||
|
||||
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
|
||||
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.
|
||||
|
|
|
@ -116,5 +116,4 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
|
|||
Mandatory or optional arguments to long options are also mandatory or optional
|
||||
for any corresponding short options.
|
||||
|
||||
Report bugs to <support@taosdata.com>.
|
||||
```
|
||||
|
|
|
@ -263,7 +263,7 @@ Once the import is complete, the full page view of TDinsight is shown below.
|
|||
|
||||
## TDinsight dashboard details
|
||||
|
||||
The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster) or databases.
|
||||
The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](../../taos-sql/node/) or databases.
|
||||
|
||||
Details of the metrics are as follows.
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ If you want to start your application in a container, you need to add the corres
|
|||
FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
ENV TDENGINE_VERSION=3.0.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
|
@ -217,7 +217,7 @@ Here is the full Dockerfile:
|
|||
```docker
|
||||
FROM golang:1.17.6-buster as builder
|
||||
ENV TDENGINE_VERSION=3.0.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
|
@ -233,7 +233,7 @@ RUN go build
|
|||
FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
ENV TDENGINE_VERSION=3.0.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
|
|
|
@ -380,6 +380,35 @@ The charset that takes effect is UTF-8.
|
|||
| Unit | bytes |
|
||||
| Value Range | 0: always compress; >0: only compress when the size of any column data exceeds the threshold; -1: always uncompress |
|
||||
| Default Value | -1 |
|
||||
| Default Value | -1 |
|
||||
| Note | available from version 2.3.0.0 | |
|
||||
|
||||
## Continuous Query Parameters |
|
||||
|
||||
### minSlidingTime
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | -------------------------------------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Minimum sliding time of time window |
|
||||
| Unit | millisecond or microsecond , depending on time precision |
|
||||
| Value Range | 10-1000000 |
|
||||
| Default Value | 10 |
|
||||
|
||||
### minIntervalTime
|
||||
|
||||
| Attribute | Description |
|
||||
| ------------- | --------------------------- |
|
||||
| Applicable | Server Only |
|
||||
| Meaning | Minimum size of time window |
|
||||
| Unit | millisecond |
|
||||
| Value Range | 1-1000000 |
|
||||
| Default Value | 10 |
|
||||
|
||||
:::info
|
||||
To prevent system resource from being exhausted by multiple concurrent streams, a random delay is applied on each stream automatically. `maxFirstStreamCompDelay` is the maximum delay time before a continuous query is started the first time. `streamCompDelayRatio` is the ratio for calculating delay time, with the size of the time window as base. `maxStreamCompDelay` is the maximum delay time. The actual delay time is a random time not bigger than `maxStreamCompDelay`. If a continuous query fails, `retryStreamComDelay` is the delay time before retrying it, also not bigger than `maxStreamCompDelay`.
|
||||
|
||||
:::
|
||||
|
||||
## Log Parameters
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: Schemaless Writing
|
||||
description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface."
|
||||
description: 'The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface.'
|
||||
---
|
||||
|
||||
In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing
|
||||
|
@ -25,7 +25,7 @@ where:
|
|||
- measurement will be used as the data table name. It will be separated from tag_set by a comma.
|
||||
- `tag_set` will be used as tags, with format like `<tag_key>=<tag_value>,<tag_key>=<tag_value>` Enter a space between `tag_set` and `field_set`.
|
||||
- `field_set`will be used as data columns, with format like `<field_key>=<field_value>,<field_key>=<field_value>` Enter a space between `field_set` and `timestamp`.
|
||||
- `timestamp` is the primary key timestamp corresponding to this row of data
|
||||
- `timestamp` is the primary key timestamp corresponding to this row of data
|
||||
|
||||
All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes (").
|
||||
|
||||
|
@ -36,14 +36,14 @@ In the schemaless writing data line protocol, each data item in the field_set ne
|
|||
- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character)
|
||||
- Numeric types will be distinguished from data types by the suffix.
|
||||
|
||||
| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
|
||||
| -------- | -------- | ------------ | -------------- |
|
||||
| 1 | None or f64 | double | 8 |
|
||||
| 2 | f32 | float | 4 |
|
||||
| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
|
||||
| 4 | i16/u16 | SmallInt/USmallInt | 2 |
|
||||
| 5 | i32/u32 | Int/UInt | 4 |
|
||||
| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
|
||||
| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
|
||||
| ----------------- | ----------- | ----------------------------- | ---------------- |
|
||||
| 1 | None or f64 | double | 8 |
|
||||
| 2 | f32 | float | 4 |
|
||||
| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
|
||||
| 4 | i16/u16 | SmallInt/USmallInt | 2 |
|
||||
| 5 | i32/u32 | Int/UInt | 4 |
|
||||
| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
|
||||
|
||||
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
|
||||
|
||||
|
@ -61,7 +61,7 @@ Note that if the wrong case is used when describing the data type suffix, or if
|
|||
|
||||
Schemaless writes process row data according to the following principles.
|
||||
|
||||
1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
|
||||
1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
|
||||
|
||||
```json
|
||||
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
|
||||
|
@ -82,7 +82,7 @@ You can configure smlChildTableName to specify table names, for example, `smlChi
|
|||
|
||||
:::tip
|
||||
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
|
||||
16KB. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
16KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -90,23 +90,23 @@ All processing logic of schemaless will still follow TDengine's underlying restr
|
|||
|
||||
Three specified modes are supported in the schemaless writing process, as follows:
|
||||
|
||||
| **Serial** | **Value** | **Description** |
|
||||
| -------- | ------------------- | ------------------------------- |
|
||||
| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
|
||||
| 2 | SML_TELNET_PROTOCOL | OpenTSDB file protocol |
|
||||
| 3 | SML_JSON_PROTOCOL | OpenTSDB JSON protocol |
|
||||
| **Serial** | **Value** | **Description** |
|
||||
| ---------- | ------------------- | ---------------------- |
|
||||
| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
|
||||
| 2 | SML_TELNET_PROTOCOL | OpenTSDB file protocol |
|
||||
| 3 | SML_JSON_PROTOCOL | OpenTSDB JSON protocol |
|
||||
|
||||
In InfluxDB line protocol mode, you must specify the precision of the input timestamp. Valid precisions are described in the following table.
|
||||
|
||||
| **No.** | **Precision** | **Description** |
|
||||
| -------- | --------------------------------- | -------------- |
|
||||
| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) |
|
||||
| 2 | TSDB_SML_TIMESTAMP_HOURS | Hours |
|
||||
| 3 | TSDB_SML_TIMESTAMP_MINUTES | Minutes |
|
||||
| 4 | TSDB_SML_TIMESTAMP_SECONDS | Seconds |
|
||||
| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | Milliseconds |
|
||||
| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | Microseconds |
|
||||
| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | Nanoseconds |
|
||||
| **No.** | **Precision** | **Description** |
|
||||
| ------- | --------------------------------- | --------------------- |
|
||||
| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) |
|
||||
| 2 | TSDB_SML_TIMESTAMP_HOURS | Hours |
|
||||
| 3 | TSDB_SML_TIMESTAMP_MINUTES | Minutes |
|
||||
| 4 | TSDB_SML_TIMESTAMP_SECONDS | Seconds |
|
||||
| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | Milliseconds |
|
||||
| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | Microseconds |
|
||||
| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | Nanoseconds |
|
||||
|
||||
In OpenTSDB file and JSON protocol modes, the precision of the timestamp is determined from its length in the standard OpenTSDB manner. User input is ignored.
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: taosKeeper
|
||||
title: taosKeeper
|
||||
description: Instructions and tips for using taosKeeper
|
||||
description: exports TDengine monitoring metrics.
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
@ -22,26 +22,35 @@ You can compile taosKeeper separately and install it. Please refer to the [taosK
|
|||
|
||||
### Configuration and running methods
|
||||
|
||||
<!-- taosKeeper needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#command-line-arguments-in-detail) and [configuration file](#configuration-file-parameters-in-detail). Command-line arguments take precedence over values in the configuration file. -->
|
||||
taosKeeper needs to be executed on the terminal of the operating system. To run taosKeeper, see [configuration file](#configuration-file-parameters-in-detail).
|
||||
taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
|
||||
|
||||
**Make sure that the TDengine cluster is running correctly before running taosKeeper. ** Ensure that the monitoring service in TDengine has been started. For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
|
||||
|
||||
<!--
|
||||
### Command-Line Parameters
|
||||
|
||||
You can use command-line parameters to run taosBenchmark and control its behavior:
|
||||
You can use command-line parameters to run taosKeeper and control its behavior:
|
||||
|
||||
```shell
|
||||
taosKeeper
|
||||
$ taosKeeper
|
||||
```
|
||||
-->
|
||||
### Environment variable
|
||||
|
||||
You can use Environment variable to run taosKeeper and control its behavior:
|
||||
|
||||
```shell
|
||||
$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
|
||||
|
||||
$ taoskeeper
|
||||
```
|
||||
|
||||
you can run `taoskeeper -h` for more detail.
|
||||
|
||||
### Configuration File
|
||||
|
||||
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
|
||||
|
||||
```shell
|
||||
taoskeeper -c <keeper config file>
|
||||
$ taoskeeper -c <keeper config file>
|
||||
```
|
||||
|
||||
**Sample configuration files**
|
||||
|
@ -110,7 +119,7 @@ Query OK, 1 rows in database (0.036162s)
|
|||
#### Export Monitoring Metrics
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:6043/metrics
|
||||
$ curl http://127.0.0.1:6043/metrics
|
||||
```
|
||||
|
||||
Sample result set (excerpt):
|
||||
|
|
|
@ -9,7 +9,7 @@ MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emq
|
|||
|
||||
The following preparations are required for EMQX to add TDengine data sources correctly.
|
||||
- The TDengine cluster is deployed and working properly
|
||||
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
|
||||
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
|
||||
- If you use the emulated writers described later, you need to install the appropriate version of Node.js. V12 is recommended.
|
||||
|
||||
## Install and start EMQX
|
||||
|
@ -28,8 +28,6 @@ USE test;
|
|||
CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP);
|
||||
```
|
||||
|
||||
Note: The table schema is based on the blog [(In Chinese) Data Transfer, Storage, Presentation, EMQX + TDengine Build MQTT IoT Data Visualization Platform](https://www.taosdata.com/blog/2020/08/04/1722.html) as an example. Subsequent operations are carried out with this blog scenario too. Please modify it according to your actual application scenario.
|
||||
|
||||
## Configuring EMQX Rules
|
||||
|
||||
Since the configuration interface of EMQX differs from version to version, here is v4.4.5 as an example. For other versions, please refer to the corresponding official documentation.
|
||||
|
@ -137,5 +135,5 @@ Use the TDengine CLI program to log in and query the appropriate databases and t
|
|||
|
||||

|
||||
|
||||
Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine.
|
||||
Please refer to the [TDengine official documentation](https://docs.tdengine.com/) for more details on how to use TDengine.
|
||||
EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX.
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
---
|
||||
sidebar_label: Google Data Studio
|
||||
title: Use Google Data Studio to access TDengine
|
||||
---
|
||||
|
||||
Data Studio is a powerful tool for reporting and visualization, offering a wide variety of charts and connectors and making it easy to generate reports based on predefined templates. Its ease of use and robust ecosystem have made it one of the first choices for people working in data analysis.
|
||||
|
||||
TDengine is a high-performance, scalable time-series database that supports SQL. Many businesses and developers in fields spanning from IoT and Industry Internet to IT and finance are using TDengine as their time-series database management solution.
|
||||
|
||||
The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
|
||||
|
||||
With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
|
||||
|
||||

|
||||
|
||||
Select the TDengine connector and click Authorize.
|
||||
|
||||

|
||||
|
||||
Then sign in to your Google Account and click Allow to enable the connection to TDengine.
|
||||
|
||||

|
||||
|
||||
In the Enter URL field, type the hostname and port of the server running the TDengine REST service. In the following fields, type your username, password, database name, table name, and the start and end times of your query range. Then, click Connect.
|
||||
|
||||

|
||||
|
||||
After the connection is established, you can use Data Studio to process your data and create reports.
|
||||
|
||||

|
||||
|
||||
In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data – some examples are shown below.
|
||||
|
||||

|
||||
|
||||
With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
|
After Width: | Height: | Size: 17 KiB |
After Width: | Height: | Size: 17 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 16 KiB |
After Width: | Height: | Size: 4.6 KiB |
After Width: | Height: | Size: 5.9 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 12 KiB |
|
@ -12,6 +12,7 @@ The design of TDengine is based on the assumption that any hardware or software
|
|||
Logical structure diagram of TDengine's distributed architecture is as follows:
|
||||
|
||||

|
||||
|
||||
<center> Figure 1: TDengine architecture diagram </center>
|
||||
|
||||
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
|
||||
|
@ -38,15 +39,16 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
|
|||
|
||||
**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted.
|
||||
|
||||
**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
|
||||
**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
|
||||
|
||||
1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
|
||||
1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
|
||||
2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step;
|
||||
3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
|
||||
|
||||
**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
|
||||
|
||||
**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
|
||||
**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
|
||||
|
||||
- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
|
||||
- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
|
||||
|
||||
|
@ -57,6 +59,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
|
|||
To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process.
|
||||
|
||||

|
||||
|
||||
<center> Figure 2: Typical process of TDengine </center>
|
||||
|
||||
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
|
||||
|
@ -121,16 +124,17 @@ The load balancing process does not require any manual intervention, and it is t
|
|||
|
||||
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect.
|
||||
|
||||
### Leader vnode Writing Process
|
||||
### Leader vnode Writing Process
|
||||
|
||||
Leader Vnode uses a writing process as follows:
|
||||
|
||||

|
||||
|
||||
<center> Figure 3: TDengine Leader writing process </center>
|
||||
|
||||
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
|
||||
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
|
||||
3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
|
||||
3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
|
||||
4. Write into memory and add the record to “skip list”;
|
||||
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
|
||||
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
|
||||
|
@ -140,6 +144,7 @@ Leader Vnode uses a writing process as follows:
|
|||
For a follower vnode, the write process as follows:
|
||||
|
||||

|
||||
|
||||
<center> Figure 4: TDengine Follower Writing Process </center>
|
||||
|
||||
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
|
||||
|
@ -212,6 +217,7 @@ When data is written to disk, the system decideswhether to compress the data bas
|
|||
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
|
||||
|
||||
dataDir format is as follows:
|
||||
|
||||
```
|
||||
dataDir data_path [tier_level]
|
||||
```
|
||||
|
@ -270,6 +276,7 @@ For the data collected by device D1001, the number of records per hour is counte
|
|||
TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure:
|
||||
|
||||

|
||||
|
||||
<center> Figure 5: Diagram of multi-table aggregation query </center>
|
||||
|
||||
1. Application sends a query condition to system;
|
||||
|
@ -279,9 +286,8 @@ TDengine creates a separate table for each data collection point, but in practic
|
|||
5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC;
|
||||
6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application.
|
||||
|
||||
Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
|
||||
Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TDengine SQL for details.
|
||||
|
||||
### Precomputation
|
||||
|
||||
In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL.
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ The agents deployed in the application nodes are responsible for providing opera
|
|||
|
||||
- **TDengine installation and deployment**
|
||||
|
||||
First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html).
|
||||
First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to [Install TDengine](../../get-started/package)
|
||||
|
||||
Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters.
|
||||
|
||||
|
@ -51,7 +51,7 @@ TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a
|
|||
|
||||
Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios.
|
||||
|
||||
Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/).
|
||||
Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](../../reference/taosadapter/).
|
||||
|
||||
If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows.
|
||||
|
||||
|
@ -411,7 +411,7 @@ TDengine provides a wealth of help documents to explain many aspects of cluster
|
|||
|
||||
### Cluster Deployment
|
||||
|
||||
The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to the blog ["Installation and Uninstallation of Various Installation Packages of TDengine"](https://www.taosdata.com/blog/2019/08/09/566.html) for the various installation package formats.
|
||||
The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to [Install TDengine](../../get-started/package) for more details.
|
||||
|
||||
Note that once the installation is complete, do not immediately start the `taosd` service, but start it after correctly configuring the parameters.
|
||||
|
||||
|
|
|
@ -4,22 +4,22 @@ sidebar_label: 文档首页
|
|||
slug: /
|
||||
---
|
||||
|
||||
TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的<a href="https://www.taosdata.com/" data-internallinksmanager029f6b8e52c="2" title="时序数据库" target="_blank" rel="noopener">时序数据库</a>(<a href="https://www.taosdata.com/time-series-database" data-internallinksmanager029f6b8e52c="9" title="Time Series DataBase" target="_blank" rel="noopener">Time Series Database</a>, <a href="https://www.taosdata.com/tsdb" data-internallinksmanager029f6b8e52c="8" title="TSDB" target="_blank" rel="noopener">TSDB</a>), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
|
||||
TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的<a href="https://www.taosdata.com/" data-internallinksmanager029f6b8e52c="2" title="时序数据库" target="_blank" rel="noopener">时序数据库</a>(<a href="https://www.taosdata.com/time-series-database" data-internallinksmanager029f6b8e52c="9" title="Time Series DataBase" target="_blank" rel="noopener">Time Series Database</a>, <a href="https://www.taosdata.com/tsdb" data-internallinksmanager029f6b8e52c="8" title="TSDB" target="_blank" rel="noopener">TSDB</a>), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。
|
||||
|
||||
TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。
|
||||
TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[基本概念](./concept)一章。
|
||||
|
||||
如果你是开发者,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要把示例代码拷贝粘贴,针对自己的应用稍作改动,就能跑起来。
|
||||
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。
|
||||
|
||||
我们已经生活在大数据的时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请参考[部署集群](./deployment)一章。
|
||||
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群](./deployment)一章。
|
||||
|
||||
TDengine 采用 SQL 作为其查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
|
||||
TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
|
||||
|
||||
如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出,配置参数,怎么监测 TDengine 是否健康运行,怎么提升系统运行的性能,那么请仔细参考[运维指南](./operation)一章。
|
||||
如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。
|
||||
|
||||
如果你对 TDengine 外围工具,REST API, 各种编程语言的连接器想做更多详细了解,请看[参考指南](./reference)一章。
|
||||
如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。
|
||||
|
||||
如果你对 TDengine 内部的架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
|
||||
如果你对 TDengine 的内部架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
|
||||
|
||||
最后,作为一个开源软件,欢迎大家的参与。如果发现文档的任何错误,描述不清晰的地方,都请在每个页面的最下方,点击“编辑本文档“直接进行修改。
|
||||
最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
|
||||
|
||||
Together, we make a difference!
|
||||
|
|
|
@ -4,72 +4,95 @@ description: 简要介绍 TDengine 的主要功能
|
|||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的<a href="https://www.taosdata.com/" data-internallinksmanager029f6b8e52c="2" title="时序数据库" target="_blank" rel="noopener">时序数据库</a>(<a href="https://www.taosdata.com/time-series-database" data-internallinksmanager029f6b8e52c="9" title="Time Series DataBase" target="_blank" rel="noopener">Time Series Database</a>, <a href="https://www.taosdata.com/tsdb" data-internallinksmanager029f6b8e52c="8" title="TSDB" target="_blank" rel="noopener">TSDB</a>)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
|
||||
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
|
||||
|
||||
本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。
|
||||
本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
|
||||
|
||||
## 主要功能
|
||||
|
||||
TDengine的主要功能如下:
|
||||
TDengine 的主要功能如下:
|
||||
|
||||
1. 高速数据写入,除 [SQL 写入](../develop/insert-data/sql-writing)外,还支持 [Schemaless 写入](../reference/schemaless/),支持 [InfluxDB LINE 协议](../develop/insert-data/influxdb-line),[OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json)等协议写入;
|
||||
2. 第三方数据采集工具 [Telegraf](../third-party/telegraf),[Prometheus](../third-party/prometheus),[StatsD](../third-party/statsd),[collectd](../third-party/collectd),[icinga2](../third-party/icinga2), [TCollector](../third-party/tcollector), [EMQ](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker) 等都可以进行配置后,不用任何代码,即可将数据写入;
|
||||
3. 支持[各种查询](../develop/query-data),包括聚合查询、嵌套查询、降采样查询、插值等
|
||||
4. 支持[用户自定义函数](../develop/udf)
|
||||
5. 支持[缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis
|
||||
6. 支持[流式计算](../develop/stream)(Stream Processing)
|
||||
7. 支持[数据订阅](../develop/tmq),而且可以指定过滤条件
|
||||
8. 支持[集群](../deployment/),可以通过多节点进行水平扩展,并通过多副本实现高可靠
|
||||
9. 提供[命令行程序](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
|
||||
10. 提供多种数据的[导入](../operation/import)、[导出](../operation/export)
|
||||
11. 支持对[TDengine 集群本身的监控](../operation/monitor)
|
||||
12. 提供各种语言的[连接器](../connector): 如 C/C++, Java, Go, Node.JS, Rust, Python, C# 等
|
||||
13. 支持 [REST 接口](../connector/rest-api/)
|
||||
14. 支持与[ Grafana 无缝集成](../third-party/grafana)
|
||||
15. 支持与 Google Data Studio 无缝集成
|
||||
16. 支持 [Kubernetes 部署](../deployment/k8s)
|
||||
1. 写入数据,支持
|
||||
- [SQL 写入](../develop/insert-data/sql-writing)
|
||||
- [无模式(Schemaless)写入](../reference/schemaless/),支持多种标准写入协议
|
||||
- [InfluxDB Line 协议](../develop/insert-data/influxdb-line)
|
||||
- [OpenTSDB Telnet 协议](../develop/insert-data/opentsdb-telnet)
|
||||
- [OpenTSDB JSON 协议](../develop/insert-data/opentsdb-json)
|
||||
- 与多种第三方工具的无缝集成,它们都可以仅通过配置而无需任何代码即可将数据写入 TDengine
|
||||
- [Telegraf](../third-party/telegraf)
|
||||
- [Prometheus](../third-party/prometheus)
|
||||
- [StatsD](../third-party/statsd)
|
||||
- [collectd](../third-party/collectd)
|
||||
- [Icinga2](../third-party/icinga2)
|
||||
- [TCollector](../third-party/tcollector)
|
||||
- [EMQX](../third-party/emq-broker)
|
||||
- [HiveMQ](../third-party/hive-mq-broker) ;
|
||||
2. 查询数据,支持
|
||||
- [标准 SQL](../taos-sql),含嵌套查询
|
||||
- [时序数据特色函数](../taos-sql/function/#time-series-extensions)
|
||||
- [时序数据特色查询](../taos-sql/distinguished),例如降采样、插值、累加和、时间加权平均、状态窗口、会话窗口等
|
||||
- [用户自定义函数(UDF)](../taos-sql/udf)
|
||||
3. [缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis 就能对时序数据进行高效处理
|
||||
4. [流式计算(Stream Processing)](../develop/stream),TDengine 不仅支持连续查询,还支持事件驱动的流式计算,这样在处理时序数据时就无需 Flink 或 Spark 这样流式计算组件
|
||||
5. [数据订阅](../develop/tmq),应用程序可以订阅一张表或一组表的数据,提供与 Kafka 相同的 API,而且可以指定过滤条件
|
||||
6. 可视化
|
||||
- 支持与 [Grafana](../third-party/grafana/) 的无缝集成
|
||||
- 支持与 Google Data Studio 的无缝集成
|
||||
7. 集群
|
||||
- [集群部署](../deployment/),可以通过增加节点进行水平扩展以提升处理能力
|
||||
- 可以通过 [Kubernetes 部署 TDengine](../deployment/k8s/)
|
||||
- 通过多副本提供高可用能力
|
||||
8. 管理
|
||||
- [监控](../operation/monitor)运行中的 TDengine 实例
|
||||
- 多种[数据导入](../operation/import)方式
|
||||
- 多种[数据导出](../operation/export)方式
|
||||
9. 工具
|
||||
- 提供[交互式命令行程序(CLI)](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
|
||||
- 提供压力测试工具[taosBenchmark](../reference/taosbenchmark),用于测试 TDengine 的性能
|
||||
10. 编程
|
||||
- 提供各种语言的[连接器(Connector)](../connector): 如 [C/C++](../connector/cpp)、[Java](../connector/java)、[Go](../connector/go)、[Node.js](../connector/node)、[Rust](../connector/rust)、[Python](../connector/python)、[C#](../connector/csharp) 等
|
||||
- 支持 [REST 接口](../connector/rest-api/)
|
||||
|
||||
更多细小的功能,请阅读整个文档。
|
||||
更多细节功能,请阅读整个文档。
|
||||
|
||||
## 竞争优势
|
||||
|
||||
由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,设计了全新的针对时序数据的存储引擎和计算引擎,因此与其他时序数据库相比,TDengine 有以下特点:
|
||||
由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,因此与其他时序数据库相比,TDengine 有以下特点:
|
||||
|
||||
- **[高性能](https://www.taosdata.com/tdengine/fast)**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10。
|
||||
- **[高性能](https://www.taosdata.com/tdengine/fast)**:TDengine 是唯一一个解决了时序数据存储的高基数难题的时序数据库,支持上亿数据采集点,并在数据插入、查询和数据压缩上远胜其它时序数据库。
|
||||
|
||||
- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。
|
||||
- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建缓存、流式计算和数据订阅等功能,为时序数据的处理提供了极简的解决方案,从而大幅降低了业务系统的设计复杂度和运维成本。
|
||||
|
||||
- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
|
||||
- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生的分布式设计、数据分片和分区、存算分离、RAFT 协议、Kubernetes 部署和完整的可观测性,TDengine 是一款云原生时序数据库并且能够部署在公有云、私有云和混合云上。
|
||||
|
||||
- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术,TDengine 具备强大的分析能力。
|
||||
- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:对系统管理员来说,TDengine 大幅降低了管理和维护的代价。对开发者来说, TDengine 提供了简单的接口、极简的解决方案和与第三方工具的无缝集成。对数据分析专家来说,TDengine 提供了便捷的数据访问能力。
|
||||
|
||||
- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:无任何依赖,安装、集群几秒搞定;提供REST以及各种语言连接器,与众多第三方工具无缝集成;提供命令行程序,便于管理和即席查询;提供各种运维工具。
|
||||
- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:通过超级表、存储计算分离、分区分片、预计算和其它技术,TDengine 能够高效地浏览、格式化和访问数据。
|
||||
|
||||
- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
|
||||
- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部在开源协议下公开。全球超过 140k 个运行实例,GitHub Star 19k,且拥有一个活跃的开发者社区。
|
||||
|
||||
采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面:
|
||||
|
||||
1. 由于其超强性能,它能将系统需要的计算资源和存储资源大幅降低
|
||||
1. 由于其超强性能,它能将系统所需的计算资源和存储资源大幅降低
|
||||
2. 因为支持 SQL,能与众多第三方软件无缝集成,学习迁移成本大幅下降
|
||||
3. 因为是一极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
|
||||
4. 因为维护简单,运营维护成本能大幅降低
|
||||
3. 因为是一款极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
|
||||
|
||||
## 技术生态
|
||||
|
||||
在整个时序大数据平台中,TDengine 在其中扮演的角色如下:
|
||||
在整个时序大数据平台中,TDengine 扮演的角色如下:
|
||||
|
||||
<figure>
|
||||
|
||||

|
||||
|
||||
<center><figcaption>图 1. TDengine技术生态图</figcaption></center>
|
||||
</figure>
|
||||
<center>图 1. TDengine技术生态图</center>
|
||||
|
||||
上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka, 他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序 (CLI) 以及可视化管理管理。
|
||||
上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。
|
||||
|
||||
## 总体适用场景
|
||||
## 典型适用场景
|
||||
|
||||
作为一个高性能、分布式、支持 SQL 的时序数据库 (Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。本文对适用场景做更多详细的分析。
|
||||
作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
|
||||
|
||||
### 数据源特点和需求
|
||||
|
||||
|
@ -91,18 +114,18 @@ TDengine的主要功能如下:
|
|||
|
||||
### 系统功能需求
|
||||
|
||||
| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
|
||||
| -------------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- |
|
||||
| 要求完整的内置数据处理算法 | | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。 |
|
||||
| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。 |
|
||||
| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
|
||||
| -------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 要求完整的内置数据处理算法 | | √ | | TDengine 实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有需求,因此特殊类型的处理需求还需要在应用层面解决。 |
|
||||
| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据库处理,或者应该考虑 TDengine 和关系型数据库配合实现系统功能。 |
|
||||
|
||||
### 系统性能需求
|
||||
|
||||
| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
|
||||
| ---------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------ |
|
||||
| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
|
||||
| 要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
|
||||
| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
|
||||
| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
|
||||
| ---------------------- | ------ | -------- | -------- | -------------------------------------------------------------------------------------------------- |
|
||||
| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
|
||||
| 要求高速处理数据 | | | √ | TDengine 专门为 IoT 优化的存储和数据处理设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
|
||||
| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
|
||||
|
||||
### 系统维护需求
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ USE power;
|
|||
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
|
||||
```
|
||||
|
||||
与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TAOS SQL 的超级表管理](/taos-sql/stable) 章节。
|
||||
与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TDengine SQL 的超级表管理](/taos-sql/stable) 章节。
|
||||
|
||||
每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。
|
||||
|
||||
|
@ -55,7 +55,7 @@ TDengine 对每个数据采集点需要独立建表。与标准的关系型数
|
|||
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
|
||||
```
|
||||
|
||||
其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。
|
||||
其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TDengine SQL 的表管理](/taos-sql/table) 章节。
|
||||
|
||||
TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序列号)。但对于有的场景,并没有唯一的 ID,可以将多个 ID 组合成一个唯一的 ID。不建议将具有唯一性的 ID 作为标签值。
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import PhpStmt from "./_php_stmt.mdx";
|
|||
应用通过连接器执行 INSERT 语句来插入数据,用户还可以通过 TDengine CLI,手动输入 INSERT 语句插入数据。
|
||||
|
||||
### 一次写入一条
|
||||
|
||||
下面这条 INSERT 就将一条记录写入到表 d1001 中:
|
||||
|
||||
```sql
|
||||
|
@ -48,7 +49,7 @@ TDengine 也支持一次向多个表写入数据,比如下面这条命令就
|
|||
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
|
||||
```
|
||||
|
||||
详细的 SQL INSERT 语法规则参考 [TAOS SQL 的数据写入](/taos-sql/insert)。
|
||||
详细的 SQL INSERT 语法规则参考 [TDengine SQL 的数据写入](/taos-sql/insert)。
|
||||
|
||||
:::info
|
||||
|
||||
|
@ -134,4 +135,3 @@ TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 类似,这
|
|||
<PhpStmt />
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ import TabItem from "@theme/TabItem";
|
|||
|
||||
从客户端程序的角度来说,高效写入数据要考虑以下几个因素:
|
||||
|
||||
1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前,TDengine 支持的一条 SQL 的最大长度为 1,048,576(1M)个字符。可通过配置客户端参数 maxSQLLength(默认值为 65480)进行修改。
|
||||
1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前,TDengine 支持的一条 SQL 的最大长度为 1,048,576(1M)个字符。
|
||||
2. 并发连接数。一般来讲,同时写入数据的并发连接数越多写入越高效(但超过一定阈值反而会下降,取决于服务端处理能力)。
|
||||
3. 数据在不同表(或子表)之间的分布,即要写入数据的相邻性。一般来说,每批次只向同一张表(或子表)写入数据比向多张表(或子表)写入数据要更高效;
|
||||
4. 写入方式。一般来讲:
|
||||
|
@ -38,13 +38,9 @@ import TabItem from "@theme/TabItem";
|
|||
|
||||
### 服务器配置的角度 {#setting-view}
|
||||
|
||||
从服务器配置的角度来说,也有很多优化写入性能的方法。
|
||||
从服务端配置的角度,要根据系统中磁盘的数量,磁盘的 I/O 能力,以及处理器能力在创建数据库时设置适当的 vgroups 数量以充分发挥系统性能。如果 vgroups 过少,则系统性能无法发挥;如果 vgroups 过多,会造成无谓的资源竞争。常规推荐 vgroups 数量为 CPU 核数的 2 倍,但仍然要结合具体的系统资源配置进行调优。
|
||||
|
||||
如果总表数不多(远小于核数乘以1000), 且无论怎么调节客户端程序,taosd 进程的 CPU 使用率都很低,那么很可能是因为表在各个 vgroup 分布不均。比如:数据库总表数是 1000 且 minTablesPerVnode 设置的也是 1000,那么所有的表都会分布在 1 个 vgroup 上。此时如果将 minTablesPerVnode 和 tablelncStepPerVnode 都设置成 100, 则可将表分布至 10 个 vgroup。(假设 maxVgroupsPerDb 大于等于 10)。
|
||||
|
||||
如果总表数比较大(比如大于500万),适当增加 maxVgroupsPerDb 也能显著提高建表的速度。maxVgroupsPerDb 默认值为 0, 自动配置为 CPU 的核数。 如果表的数量巨大,也建议调节 maxTablesPerVnode 参数,以免超过单个 vnode 建表的上限。
|
||||
|
||||
更多调优参数,请参考 [配置参考](../../../reference/config)部分。
|
||||
更多调优参数,请参考 [数据库管理](../../../taos-sql/database) 和 [服务端配置](../../../reference/config)。
|
||||
|
||||
## 高效写入示例 {#sample-code}
|
||||
|
||||
|
@ -352,7 +348,7 @@ main 函数可以接收 5 个启动参数,依次是:
|
|||
|
||||
<details>
|
||||
|
||||
SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL, 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,如果接近 SQL 最大长度限制(maxSQLLength),将会立即执行 SQL。为了减少 SQL 此时,建议将 maxSQLLength 适当调大。
|
||||
SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL, 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,根据 TDengine 3.0 的限制由输入参数 maxSQLLength 传入了支持的最大 SQL 长度,即 1048576 。
|
||||
|
||||
<summary>SQLWriter</summary>
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ Query OK, 2 row(s) in set (0.001100s)
|
|||
|
||||
为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。
|
||||
|
||||
具体的查询语法请看 [TAOS SQL 的数据查询](../../taos-sql/select) 章节。
|
||||
具体的查询语法请看 [TDengine SQL 的数据查询](../../taos-sql/select) 章节。
|
||||
|
||||
## 多表聚合查询
|
||||
|
||||
|
@ -75,7 +75,7 @@ taos> SELECT count(*), max(current) FROM meters where groupId = 2;
|
|||
Query OK, 1 row(s) in set (0.002136s)
|
||||
```
|
||||
|
||||
在 [TAOS SQL 的数据查询](../../taos-sql/select) 一章,查询类操作都会注明是否支持超级表。
|
||||
在 [TDengine SQL 的数据查询](../../taos-sql/select) 一章,查询类操作都会注明是否支持超级表。
|
||||
|
||||
## 降采样查询、插值
|
||||
|
||||
|
@ -123,7 +123,7 @@ Query OK, 6 rows in database (0.005515s)
|
|||
|
||||
如果一个时间间隔里,没有采集的数据,TDengine 还提供插值计算的功能。
|
||||
|
||||
语法规则细节请见 [TAOS SQL 的按时间窗口切分聚合](../../taos-sql/distinguished) 章节。
|
||||
语法规则细节请见 [TDengine SQL 的按时间窗口切分聚合](../../taos-sql/distinguished) 章节。
|
||||
|
||||
## 示例代码
|
||||
|
||||
|
|
|
@ -355,19 +355,15 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
|
|||
|
||||
:::info
|
||||
|
||||
- 目前仅支持一层嵌套,也即不能在子查询中再嵌入子查询。
|
||||
- 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表可以使用 AS 语法做重命名,以便于外层查询中方便引用。
|
||||
- 目前不能在“连续查询”功能中使用子查询。
|
||||
- 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表建议起别名,以便于外层查询中方便引用。
|
||||
- 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。
|
||||
- 目前内层查询、外层查询均不支持 UNION 操作。
|
||||
- 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。
|
||||
- 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。
|
||||
- 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制:
|
||||
- 计算函数部分:
|
||||
- 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。
|
||||
- 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。
|
||||
- 外层查询中不支持 IN 算子,但在内层中可以使用。
|
||||
- 外层查询不支持 GROUP BY。
|
||||
- 如果内层查询的结果数据未提供时间戳,那么计算过程隐式依赖时间戳的函数在外层会无法正常工作。例如:INTERP, DERIVATIVE, IRATE, LAST_ROW, FIRST, LAST, TWA, STATEDURATION, TAIL, UNIQUE。
|
||||
- 如果内层查询的结果数据不是有效的时间序列,那么计算过程依赖数据为时间序列的函数在外层会无法正常工作。例如:LEASTSQUARES, ELAPSED, INTERP, DERIVATIVE, IRATE, TWA, DIFF, STATECOUNT, STATEDURATION, CSUM, MAVG, TAIL, UNIQUE。
|
||||
- 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:PERCENTILE。
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -6,11 +6,11 @@ description: TDengine 提供的时序数据特有的查询功能
|
|||
|
||||
TDengine 是专为时序数据而研发的大数据平台,存储和计算都针对时序数据的特定进行了量身定制,在支持标准 SQL 的基础之上,还提供了一系列贴合时序业务场景的特色查询语法,极大的方便时序场景的应用开发。
|
||||
|
||||
TDengine 提供的特色查询包括标签切分查询和窗口切分查询。
|
||||
TDengine 提供的特色查询包括数据切分查询和窗口切分查询。
|
||||
|
||||
## 标签切分查询
|
||||
## 数据切分查询
|
||||
|
||||
超级表查询中,当需要针对标签进行数据切分然后在切分出的数据空间内再进行一系列的计算时使用标签切分子句,标签切分的语句如下:
|
||||
当需要按一定的维度对数据进行切分然后在切分出的数据空间内再进行一系列的计算时使用数据切分子句,数据切分语句的语法如下:
|
||||
|
||||
```sql
|
||||
PARTITION BY part_list
|
||||
|
@ -18,22 +18,23 @@ PARTITION BY part_list
|
|||
|
||||
part_list 可以是任意的标量表达式,包括列、常量、标量函数和它们的组合。
|
||||
|
||||
当 PARTITION BY 和标签一起使用时,TDengine 按如下方式处理标签切分子句:
|
||||
TDengine 按如下方式处理数据切分子句:
|
||||
|
||||
- 标签切分子句位于 WHERE 子句之后,且不能和 JOIN 子句一起使用。
|
||||
- 标签切分子句将超级表数据按指定的标签组合进行切分,每个切分的分片进行指定的计算。计算由之后的子句定义(窗口子句、GROUP BY 子句或 SELECT 子句)。
|
||||
- 标签切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。
|
||||
- 数据切分子句位于 WHERE 子句之后。
|
||||
- 数据切分子句将表数据按指定的维度进行切分,每个切分的分片进行指定的计算。计算由之后的子句定义(窗口子句、GROUP BY 子句或 SELECT 子句)。
|
||||
- 数据切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。
|
||||
|
||||
```sql
|
||||
select max(current) from meters partition by location interval(10m)
|
||||
```
|
||||
数据切分子句最常见的用法就是在超级表查询中,按标签将子表数据进行切分,然后分别进行计算。特别是 PARTITION BY TBNAME 用法,它将每个子表的数据独立出来,形成一条条独立的时间序列,极大的方便了各种时序场景的统计分析。
|
||||
|
||||
## 窗口切分查询
|
||||
|
||||
TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)三种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。窗口切分查询语法如下:
|
||||
|
||||
```sql
|
||||
SELECT function_list FROM tb_name
|
||||
SELECT select_list FROM tb_name
|
||||
[WHERE where_condition]
|
||||
[SESSION(ts_col, tol_val)]
|
||||
[STATE_WINDOW(col)]
|
||||
|
@ -43,19 +44,15 @@ SELECT function_list FROM tb_name
|
|||
|
||||
在上述语法中的具体限制如下
|
||||
|
||||
### 窗口切分查询中使用函数的限制
|
||||
|
||||
- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:DIFF 以及四则运算)。
|
||||
- 此外 LAST_ROW 查询也不能与窗口聚合同时出现。
|
||||
- 标量函数(如:CEIL/FLOOR 等)也不能使用在窗口聚合查询中。
|
||||
|
||||
### 窗口子句的规则
|
||||
|
||||
- 窗口子句位于标签切分子句之后,GROUP BY 子句之前,且不可以和 GROUP BY 子句一起使用。
|
||||
- 窗口子句位于数据切分子句之后,GROUP BY 子句之前,且不可以和 GROUP BY 子句一起使用。
|
||||
- 窗口子句将数据按窗口进行切分,对每个窗口进行 SELECT 列表中的表达式的计算,SELECT 列表中的表达式只能包含:
|
||||
- 常量。
|
||||
- 聚集函数。
|
||||
- _wstart伪列、_wend伪列和_wduration伪列。
|
||||
- 聚集函数(包括选择函数和可以由参数确定输出行数的时序特有函数)。
|
||||
- 包含上面表达式的表达式。
|
||||
- 且至少包含一个聚集函数。
|
||||
- 窗口子句不可以和 GROUP BY 子句一起使用。
|
||||
- WHERE 语句可以指定查询的起止时间和其他过滤条件。
|
||||
|
||||
|
@ -74,7 +71,7 @@ FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填
|
|||
|
||||
1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。
|
||||
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
|
||||
3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。
|
||||
3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 PARTITION BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 PARTITION BY 语句分组,则返回结果中每个 PARTITION 内不按照时间序列严格单调递增。
|
||||
|
||||
:::
|
||||
|
||||
|
@ -106,7 +103,7 @@ SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
|
|||
|
||||
### 状态窗口
|
||||
|
||||
使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用)
|
||||
使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。
|
||||
|
||||

|
||||
|
||||
|
@ -122,7 +119,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
|
|||
|
||||

|
||||
|
||||
在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用)
|
||||
在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。
|
||||
|
||||
```
|
||||
|
||||
|
|
|
@ -9,14 +9,51 @@ description: 企业版中才具有的权限管理功能
|
|||
## 创建用户
|
||||
|
||||
```sql
|
||||
CREATE USER use_name PASS 'password';
|
||||
CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
|
||||
```
|
||||
|
||||
创建用户。
|
||||
|
||||
use_name最长为23字节。
|
||||
use_name 最长为 23 字节。
|
||||
|
||||
password最长为128字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
|
||||
password 最长为 128 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
|
||||
|
||||
SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。
|
||||
|
||||
例如,创建密码为123456且可以查看系统信息的用户test如下:
|
||||
|
||||
```sql
|
||||
taos> create user test pass '123456' sysinfo 1;
|
||||
Query OK, 0 of 0 rows affected (0.001254s)
|
||||
```
|
||||
|
||||
## 查看用户
|
||||
|
||||
```sql
|
||||
SHOW USERS;
|
||||
```
|
||||
|
||||
查看用户信息。
|
||||
|
||||
```sql
|
||||
taos> show users;
|
||||
name | super | enable | sysinfo | create_time |
|
||||
================================================================================
|
||||
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
|
||||
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
|
||||
Query OK, 2 rows in database (0.001657s)
|
||||
```
|
||||
|
||||
也可以通过查询INFORMATION_SCHEMA.INS_USERS系统表来查看用户信息,例如:
|
||||
|
||||
```sql
|
||||
taos> select * from information_schema.ins_users;
|
||||
name | super | enable | sysinfo | create_time |
|
||||
================================================================================
|
||||
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
|
||||
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
|
||||
Query OK, 2 rows in database (0.001953s)
|
||||
```
|
||||
|
||||
## 删除用户
|
||||
|
||||
|
@ -37,9 +74,15 @@ alter_user_clause: {
|
|||
```
|
||||
|
||||
- PASS:修改用户密码。
|
||||
- ENABLE:修改用户是否启用。1表示启用此用户,0表示禁用此用户。
|
||||
- SYSINFO:修改用户是否可查看系统信息。1表示可以查看系统信息,0表示不可以查看系统信息。
|
||||
- ENABLE:修改用户是否启用。1 表示启用此用户,0 表示禁用此用户。
|
||||
- SYSINFO:修改用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息。
|
||||
|
||||
例如,禁用 test 用户:
|
||||
|
||||
```sql
|
||||
taos> alter user test enable 0;
|
||||
Query OK, 0 of 0 rows affected (0.001160s)
|
||||
```
|
||||
|
||||
## 授权
|
||||
|
||||
|
@ -62,7 +105,7 @@ priv_level : {
|
|||
}
|
||||
```
|
||||
|
||||
对用户授权。
|
||||
对用户授权。授权功能只包含在企业版中。
|
||||
|
||||
授权级别支持到DATABASE,权限有READ和WRITE两种。
|
||||
|
||||
|
@ -92,4 +135,4 @@ priv_level : {
|
|||
|
||||
```
|
||||
|
||||
收回对用户的授权。
|
||||
收回对用户的授权。授权功能只包含在企业版中。
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
---
|
||||
title: TAOS SQL
|
||||
description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容"
|
||||
title: TDengine SQL
|
||||
description: 'TDengine SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容'
|
||||
---
|
||||
|
||||
本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
|
||||
本文档说明 TDengine SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
|
||||
|
||||
TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TAOS SQL 语句的最大长度为 1M。TAOS SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
|
||||
TDengine SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TDengine SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TDengine SQL 语句的最大长度为 1M。TDengine SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
|
||||
|
||||
本章节 SQL 语法遵循如下约定:
|
||||
|
||||
|
|
|
@ -119,7 +119,7 @@ taos -h tdengine -P 6030
|
|||
FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
ENV TDENGINE_VERSION=3.0.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
RUN wget -c https://www.tdengine.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
|
@ -234,7 +234,7 @@ go mod tidy
|
|||
```dockerfile
|
||||
FROM golang:1.19.0-buster as builder
|
||||
ENV TDENGINE_VERSION=3.0.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
|
@ -250,7 +250,7 @@ RUN go build
|
|||
FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
ENV TDENGINE_VERSION=3.0.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
|
|
|
@ -3,7 +3,7 @@ title: Schemaless 写入
|
|||
description: 'Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构'
|
||||
---
|
||||
|
||||
在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless
|
||||
在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless
|
||||
将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
|
||||
|
||||
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,你也可以通过,SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
|
||||
|
@ -36,14 +36,14 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
|
|||
- 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)
|
||||
- 数值类型将通过后缀来区分数据类型:
|
||||
|
||||
| **序号** | **后缀** | **映射类型** | **大小(字节)** |
|
||||
| -------- | -------- | ------------ | -------------- |
|
||||
| 1 | 无或 f64 | double | 8 |
|
||||
| 2 | f32 | float | 4 |
|
||||
| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
|
||||
| 4 | i16/u16 | SmallInt/USmallInt | 2 |
|
||||
| 5 | i32/u32 | Int/UInt | 4 |
|
||||
| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
|
||||
| **序号** | **后缀** | **映射类型** | **大小(字节)** |
|
||||
| -------- | ----------- | ----------------------------- | -------------- |
|
||||
| 1 | 无或 f64 | double | 8 |
|
||||
| 2 | f32 | float | 4 |
|
||||
| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
|
||||
| 4 | i16/u16 | SmallInt/USmallInt | 2 |
|
||||
| 5 | i32/u32 | Int/UInt | 4 |
|
||||
| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
|
||||
|
||||
- t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。
|
||||
|
||||
|
@ -69,7 +69,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
|||
|
||||
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。
|
||||
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
|
||||
为了让用户可以指定生成的表名,可以通过配置smlChildTableName来指定(比如 配置smlChildTableName=tname 插入数据为st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为cpu1,注意如果多行数据tname相同,但是后面的tag_set不同,则使用第一次自动建表时指定的tag_set,其他的会忽略)。
|
||||
为了让用户可以指定生成的表名,可以通过配置 smlChildTableName 来指定(比如 配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一次自动建表时指定的 tag_set,其他的会忽略)。
|
||||
|
||||
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
|
||||
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
|
||||
|
@ -78,11 +78,11 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
|
|||
NULL。
|
||||
6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
|
||||
7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
|
||||
8. 为了提高写入的效率,默认假设同一个超级表中field_set的顺序是一样的(第一条数据包含所有的field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数smlDataFormat为false,否则,数据写入按照相同顺序写入,库中数据会异常。
|
||||
8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。
|
||||
|
||||
:::tip
|
||||
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
|
||||
16KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
|
||||
16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
|
||||
|
||||
:::
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
sidebar_label: taosKeeper
|
||||
title: taosKeeper
|
||||
description: TDengine taosKeeper 使用说明
|
||||
description: TDengine 3.0 版本监控指标的导出工具
|
||||
---
|
||||
|
||||
## 简介
|
||||
|
@ -22,26 +22,36 @@ taosKeeper 安装方式:
|
|||
|
||||
### 配置和运行方式
|
||||
|
||||
<!-- taosKeeper 需要在操作系统终端执行,该工具支持两种配置方式:[命令行参数](#命令行参数启动) 和 [配置文件](#配置文件启动)。命令行参数优先级高于配置文件参数。-->
|
||||
taosKeeper 需要在操作系统终端执行,该工具支持 [配置文件启动](#配置文件启动)。
|
||||
taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:[命令行参数](#命令行参数启动)、[环境变量](#环境变量启动) 和 [配置文件](#配置文件启动)。优先级为:命令行参数、环境变量、配置文件参数。
|
||||
|
||||
**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。** 并且 TDengine 已经开启监控服务,具体请参考:[TDengine 监控配置](../config/#监控相关)。
|
||||
|
||||
<!--
|
||||
### 命令行参数启动
|
||||
|
||||
在使用命令行参数运行 taosBenchmark 并控制其行为。
|
||||
在使用命令行参数运行 taosKeeper 并控制其行为。
|
||||
|
||||
```shell
|
||||
taosKeeper
|
||||
$ taosKeeper
|
||||
```
|
||||
-->
|
||||
|
||||
### 环境变量启动
|
||||
|
||||
通过设置环境变量达到控制启动参数的目的,通常在容器中运行时使用。
|
||||
|
||||
```shell
|
||||
$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
|
||||
|
||||
$ taoskeeper
|
||||
```
|
||||
|
||||
具体参数列表请参照 `taoskeeper -h` 输入结果。
|
||||
|
||||
### 配置文件启动
|
||||
|
||||
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。
|
||||
|
||||
```shell
|
||||
taoskeeper -c <keeper config file>
|
||||
$ taoskeeper -c <keeper config file>
|
||||
```
|
||||
|
||||
**下面是配置文件的示例:**
|
||||
|
@ -110,7 +120,7 @@ Query OK, 1 rows in database (0.036162s)
|
|||
#### 导出监控指标
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:6043/metrics
|
||||
$ curl http://127.0.0.1:6043/metrics
|
||||
```
|
||||
|
||||
部分结果集:
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
---
|
||||
sidebar_label: Google Data Studio
|
||||
title: TDengine Google Data Studio Connector
|
||||
description: 使用 Google Data Studio 存取 TDengine 数据的详细指南
|
||||
---
|
||||
|
||||
Google Data Studio 是一个强大的报表可视化工具,它提供了丰富的数据图表和数据连接,可以非常方便地按照既定模板生成报表。因其简便易用和生态丰富而在数据分析领域得到一众数据科学家的青睐。
|
||||
|
||||
Data Studio 可以支持多种数据来源,除了诸如 Google Analytics、Google AdWords、Search Console、BigQuery 等 Google 自己的服务之外,用户也可以直接将离线文件上传至 Google Cloud Storage,或是通过连接器来接入其它数据源。
|
||||
|
||||

|
||||
|
||||
目前 TDengine 连接器已经发布到 Google Data Studio 应用商店,你可以在 “Connect to Data” 页面下直接搜索 TDengine,将其选作数据源。
|
||||
|
||||

|
||||
|
||||
接下来选择 AUTHORIZE 按钮。
|
||||
|
||||

|
||||
|
||||
设置允许连接自己的账号到外部服务。
|
||||
|
||||

|
||||
|
||||
在接下来的页面选择运行 TDengine REST 服务的 URL,并输入用户名、密码、数据库名称、表名称以及查询时间范围,并点击右上角的 CONNECT 按钮。
|
||||
|
||||

|
||||
|
||||
连接成功后,就可以使用 GDS 方便地进行数据处理并创建报表了。
|
||||
|
||||

|
||||
|
||||
目前的维度和指标规则是:timestamp 类型的字段和 tag 字段会被连接器定义为维度,而其他类型的字段是指标。用户还可以根据自己的需求创建不同的表。
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
After Width: | Height: | Size: 17 KiB |
After Width: | Height: | Size: 17 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 16 KiB |
After Width: | Height: | Size: 4.6 KiB |
After Width: | Height: | Size: 5.9 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 12 KiB |
|
@ -288,7 +288,7 @@ TDengine 对每个数据采集点单独建表,但在实际应用中经常需
|
|||
7. vnode 返回本节点的查询计算结果;
|
||||
8. qnode 完成多节点数据聚合后将最终查询结果返回给客户端;
|
||||
|
||||
由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TAOS SQL。
|
||||
由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TDengine SQL。
|
||||
|
||||
### 预计算
|
||||
|
||||
|
|
|
@ -13,15 +13,9 @@ IF (TD_LINUX)
|
|||
#TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
|
||||
|
||||
add_executable(tmq "")
|
||||
add_executable(tmq_taosx "")
|
||||
add_executable(stream_demo "")
|
||||
add_executable(demoapi "")
|
||||
|
||||
target_sources(tmq_taosx
|
||||
PRIVATE
|
||||
"tmq_taosx.c"
|
||||
)
|
||||
|
||||
target_sources(tmq
|
||||
PRIVATE
|
||||
"tmq.c"
|
||||
|
@ -41,10 +35,6 @@ IF (TD_LINUX)
|
|||
taos_static
|
||||
)
|
||||
|
||||
target_link_libraries(tmq_taosx
|
||||
taos_static
|
||||
)
|
||||
|
||||
target_link_libraries(stream_demo
|
||||
taos_static
|
||||
)
|
||||
|
@ -57,10 +47,6 @@ IF (TD_LINUX)
|
|||
PUBLIC "${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
target_include_directories(tmq_taosx
|
||||
PUBLIC "${TD_SOURCE_DIR}/include/os"
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
)
|
||||
|
||||
target_include_directories(stream_demo
|
||||
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
|
||||
|
@ -73,7 +59,6 @@ IF (TD_LINUX)
|
|||
)
|
||||
|
||||
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
|
||||
SET_TARGET_PROPERTIES(tmq_taosx PROPERTIES OUTPUT_NAME tmq_taosx)
|
||||
SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
|
||||
SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi)
|
||||
ENDIF ()
|
||||
|
|
|
@ -1,489 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <time.h>
|
||||
#include "taos.h"
|
||||
|
||||
static int running = 1;
|
||||
|
||||
static TAOS* use_db(){
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
if (pConn == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
TAOS_RES* pRes = taos_query(pConn, "use db_taosx");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in use db_taosx, reason:%s\n", taos_errstr(pRes));
|
||||
return NULL;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
return pConn;
|
||||
}
|
||||
|
||||
static void msg_process(TAOS_RES* msg) {
|
||||
/*memset(buf, 0, 1024);*/
|
||||
printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg));
|
||||
printf("db: %s\n", tmq_get_db_name(msg));
|
||||
printf("vg: %d\n", tmq_get_vgroup_id(msg));
|
||||
TAOS *pConn = use_db();
|
||||
if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
|
||||
char* result = tmq_get_json_meta(msg);
|
||||
if (result) {
|
||||
printf("meta result: %s\n", result);
|
||||
}
|
||||
tmq_free_json_meta(result);
|
||||
}
|
||||
|
||||
tmq_raw_data raw = {0};
|
||||
tmq_get_raw(msg, &raw);
|
||||
int32_t ret = tmq_write_raw(pConn, raw);
|
||||
printf("write raw data: %s\n", tmq_err2str(ret));
|
||||
|
||||
// else{
|
||||
// while(1){
|
||||
// int numOfRows = 0;
|
||||
// void *pData = NULL;
|
||||
// taos_fetch_raw_block(msg, &numOfRows, &pData);
|
||||
// if(numOfRows == 0) break;
|
||||
// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows);
|
||||
// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg));
|
||||
// printf("write raw data: %s\n", tmq_err2str(ret));
|
||||
// }
|
||||
// }
|
||||
|
||||
taos_close(pConn);
|
||||
}
|
||||
|
||||
int32_t init_env() {
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
if (pConn == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 4");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "drop database if exists abc1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in drop db, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create database if not exists abc1 vgroups 3");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in create db, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "use abc1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in use db, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn,
|
||||
"create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
|
||||
"nchar(8), t4 bool)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table if not exists ct1 using st1(t1) tags(2000)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table if not exists ct2 using st1(t1) tags(NULL)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create child table ct2, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "insert into ct1 values(1626006833600, 3, 4, 'b')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table if not exists ct3 using st1(t1) tags(3000)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table if not exists ct4 using st1(t3) tags('ct4')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create child table ct4, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "alter table st1 add column c4 bigint");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "insert into ct3 values(1626006833605, 53, 63, 'cffffffffffffffffffffffffffff', 8989898899999) (1626006833609, 51, 62, 'c333', 940)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "insert into ct3 select * from ct1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "alter table st1 add tag t2 binary(64)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "alter table ct3 set tag t1=5000");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to slter child table ct3, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "delete from abc1 .ct3 where ts < 1626006833606");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "drop table ct3 ct1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "drop table st1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "alter table n1 add column c3 bigint");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "alter table n1 modify column c2 nchar(8)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "alter table n1 rename column c3 cc3");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "alter table n1 comment 'hello'");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "alter table n1 drop column c1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "insert into n1 values(now, 'eeee', 8989898899999) (now+9s, 'c333', 940)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to insert into n1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "drop table n1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table jt1 using jt tags('{\"k1\":1, \"k2\":\"hello\"}')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create table jt2 using jt tags('')");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn,
|
||||
"create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
|
||||
"nchar(8), t4 bool)");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "drop table st1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
taos_close(pConn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t create_topic() {
|
||||
printf("create topic\n");
|
||||
TAOS_RES* pRes;
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
if (pConn == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
pRes = taos_query(pConn, "use abc1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in use db, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
|
||||
return -1;
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
|
||||
taos_close(pConn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
|
||||
printf("commit %d tmq %p param %p\n", code, tmq, param);
|
||||
}
|
||||
|
||||
tmq_t* build_consumer() {
|
||||
#if 0
|
||||
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
|
||||
assert(pConn != NULL);
|
||||
|
||||
TAOS_RES* pRes = taos_query(pConn, "use abc1");
|
||||
if (taos_errno(pRes) != 0) {
|
||||
printf("error in use db, reason:%s\n", taos_errstr(pRes));
|
||||
}
|
||||
taos_free_result(pRes);
|
||||
#endif
|
||||
|
||||
tmq_conf_t* conf = tmq_conf_new();
|
||||
tmq_conf_set(conf, "group.id", "tg2");
|
||||
tmq_conf_set(conf, "client.id", "my app 1");
|
||||
tmq_conf_set(conf, "td.connect.user", "root");
|
||||
tmq_conf_set(conf, "td.connect.pass", "taosdata");
|
||||
tmq_conf_set(conf, "msg.with.table.name", "true");
|
||||
tmq_conf_set(conf, "enable.auto.commit", "true");
|
||||
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
|
||||
|
||||
|
||||
/*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/
|
||||
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
|
||||
assert(tmq);
|
||||
tmq_conf_destroy(conf);
|
||||
return tmq;
|
||||
}
|
||||
|
||||
tmq_list_t* build_topic_list() {
|
||||
tmq_list_t* topic_list = tmq_list_new();
|
||||
tmq_list_append(topic_list, "topic_ctb_column");
|
||||
/*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/
|
||||
return topic_list;
|
||||
}
|
||||
|
||||
void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
|
||||
int32_t code;
|
||||
|
||||
if ((code = tmq_subscribe(tmq, topics))) {
|
||||
fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
|
||||
printf("subscribe err\n");
|
||||
return;
|
||||
}
|
||||
int32_t cnt = 0;
|
||||
while (running) {
|
||||
TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
|
||||
if (tmqmessage) {
|
||||
cnt++;
|
||||
msg_process(tmqmessage);
|
||||
/*if (cnt >= 2) break;*/
|
||||
/*printf("get data\n");*/
|
||||
taos_free_result(tmqmessage);
|
||||
/*} else {*/
|
||||
/*break;*/
|
||||
/*tmq_commit_sync(tmq, NULL);*/
|
||||
}
|
||||
}
|
||||
|
||||
code = tmq_consumer_close(tmq);
|
||||
if (code)
|
||||
fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
|
||||
else
|
||||
fprintf(stderr, "%% Consumer closed\n");
|
||||
}
|
||||
|
||||
void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
|
||||
static const int MIN_COMMIT_COUNT = 1;
|
||||
|
||||
int msg_count = 0;
|
||||
int32_t code;
|
||||
|
||||
if ((code = tmq_subscribe(tmq, topics))) {
|
||||
fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
|
||||
return;
|
||||
}
|
||||
|
||||
tmq_list_t* subList = NULL;
|
||||
tmq_subscription(tmq, &subList);
|
||||
char** subTopics = tmq_list_to_c_array(subList);
|
||||
int32_t sz = tmq_list_get_size(subList);
|
||||
printf("subscribed topics: ");
|
||||
for (int32_t i = 0; i < sz; i++) {
|
||||
printf("%s, ", subTopics[i]);
|
||||
}
|
||||
printf("\n");
|
||||
tmq_list_destroy(subList);
|
||||
|
||||
while (running) {
|
||||
TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
|
||||
if (tmqmessage) {
|
||||
msg_process(tmqmessage);
|
||||
taos_free_result(tmqmessage);
|
||||
|
||||
/*tmq_commit_sync(tmq, NULL);*/
|
||||
/*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
|
||||
}
|
||||
}
|
||||
|
||||
code = tmq_consumer_close(tmq);
|
||||
if (code)
|
||||
fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
|
||||
else
|
||||
fprintf(stderr, "%% Consumer closed\n");
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
printf("env init\n");
|
||||
if (init_env() < 0) {
|
||||
return -1;
|
||||
}
|
||||
create_topic();
|
||||
|
||||
tmq_t* tmq = build_consumer();
|
||||
tmq_list_t* topic_list = build_topic_list();
|
||||
basic_consume_loop(tmq, topic_list);
|
||||
/*sync_consume_loop(tmq, topic_list);*/
|
||||
}
|
|
@ -254,6 +254,7 @@ enum tmq_res_t {
|
|||
TMQ_RES_INVALID = -1,
|
||||
TMQ_RES_DATA = 1,
|
||||
TMQ_RES_TABLE_META = 2,
|
||||
TMQ_RES_TAOSX = 3,
|
||||
};
|
||||
|
||||
typedef struct tmq_raw_data {
|
||||
|
|
|
@ -73,6 +73,7 @@ enum {
|
|||
TMQ_MSG_TYPE__POLL_RSP,
|
||||
TMQ_MSG_TYPE__POLL_META_RSP,
|
||||
TMQ_MSG_TYPE__EP_RSP,
|
||||
TMQ_MSG_TYPE__TAOSX_RSP,
|
||||
TMQ_MSG_TYPE__END_RSP,
|
||||
};
|
||||
|
||||
|
@ -129,7 +130,6 @@ typedef struct SDataBlockInfo {
|
|||
uint32_t capacity;
|
||||
// TODO: optimize and remove following
|
||||
int64_t version; // used for stream, and need serialization
|
||||
int64_t ts; // used for stream, and need serialization
|
||||
int32_t childId; // used for stream, do not serialize
|
||||
EStreamType type; // used for stream, do not serialize
|
||||
STimeWindow calWin; // used for stream, do not serialize
|
||||
|
|
|
@ -144,8 +144,8 @@ void taosCfgDynamicOptions(const char *option, const char *value);
|
|||
|
||||
struct SConfig *taosGetCfg();
|
||||
|
||||
void taosSetAllDebugFlag(int32_t flag);
|
||||
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
|
||||
void taosSetAllDebugFlag(int32_t flag, bool rewrite);
|
||||
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
|
||||
int32_t taosSetCfg(SConfig *pCfg, char *name);
|
||||
void taosLocalCfgForbiddenToChange(char* name, bool* forbidden);
|
||||
|
||||
|
|
|
@ -276,7 +276,6 @@ struct SSchema {
|
|||
char name[TSDB_COL_NAME_LEN];
|
||||
};
|
||||
|
||||
|
||||
typedef struct {
|
||||
char tbName[TSDB_TABLE_NAME_LEN];
|
||||
char stbName[TSDB_TABLE_NAME_LEN];
|
||||
|
@ -295,17 +294,15 @@ typedef struct {
|
|||
SSchema* pSchemas;
|
||||
} STableMetaRsp;
|
||||
|
||||
|
||||
|
||||
typedef struct {
|
||||
int32_t code;
|
||||
int8_t hashMeta;
|
||||
int64_t uid;
|
||||
char* tblFName;
|
||||
int32_t numOfRows;
|
||||
int32_t affectedRows;
|
||||
int64_t sver;
|
||||
STableMetaRsp* pMeta;
|
||||
int32_t code;
|
||||
int8_t hashMeta;
|
||||
int64_t uid;
|
||||
char* tblFName;
|
||||
int32_t numOfRows;
|
||||
int32_t affectedRows;
|
||||
int64_t sver;
|
||||
STableMetaRsp* pMeta;
|
||||
} SSubmitBlkRsp;
|
||||
|
||||
typedef struct {
|
||||
|
@ -320,7 +317,7 @@ typedef struct {
|
|||
|
||||
int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp);
|
||||
int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp);
|
||||
void tFreeSSubmitBlkRsp(void* param);
|
||||
void tFreeSSubmitBlkRsp(void* param);
|
||||
void tFreeSSubmitRsp(SSubmitRsp* pRsp);
|
||||
|
||||
#define COL_SMA_ON ((int8_t)0x1)
|
||||
|
@ -2049,8 +2046,8 @@ typedef struct {
|
|||
STableMetaRsp* pMeta;
|
||||
} SVCreateTbRsp, SVUpdateTbRsp;
|
||||
|
||||
int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
|
||||
int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
|
||||
int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
|
||||
int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
|
||||
void tFreeSVCreateTbRsp(void* param);
|
||||
|
||||
int32_t tSerializeSVCreateTbReq(void** buf, SVCreateTbReq* pReq);
|
||||
|
@ -2961,6 +2958,25 @@ typedef struct {
|
|||
int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);
|
||||
int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
SMqRspHead head;
|
||||
STqOffsetVal reqOffset;
|
||||
STqOffsetVal rspOffset;
|
||||
int32_t blockNum;
|
||||
int8_t withTbName;
|
||||
int8_t withSchema;
|
||||
SArray* blockDataLen;
|
||||
SArray* blockData;
|
||||
SArray* blockTbName;
|
||||
SArray* blockSchema;
|
||||
int32_t createTableNum;
|
||||
SArray* createTableLen;
|
||||
SArray* createTableReq;
|
||||
} STaosxRsp;
|
||||
|
||||
int32_t tEncodeSTaosxRsp(SEncoder* pEncoder, const STaosxRsp* pRsp);
|
||||
int32_t tDecodeSTaosxRsp(SDecoder* pDecoder, STaosxRsp* pRsp);
|
||||
|
||||
typedef struct {
|
||||
SMqRspHead head;
|
||||
char cgroup[TSDB_CGROUP_LEN];
|
||||
|
|
|
@ -92,6 +92,8 @@ struct SResultRowEntryInfo;
|
|||
//for selectivity query, the corresponding tag value is assigned if the data is qualified
|
||||
typedef struct SSubsidiaryResInfo {
|
||||
int16_t num;
|
||||
int32_t rowLen;
|
||||
char* buf; // serialize data buffer
|
||||
struct SqlFunctionCtx **pCtx;
|
||||
} SSubsidiaryResInfo;
|
||||
|
||||
|
@ -118,6 +120,11 @@ typedef struct SInputColumnInfoData {
|
|||
uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions.
|
||||
} SInputColumnInfoData;
|
||||
|
||||
typedef struct SSerializeDataHandle {
|
||||
struct SDiskbasedBuf* pBuf;
|
||||
int32_t currentPage;
|
||||
} SSerializeDataHandle;
|
||||
|
||||
// sql function runtime context
|
||||
typedef struct SqlFunctionCtx {
|
||||
SInputColumnInfoData input;
|
||||
|
@ -137,10 +144,9 @@ typedef struct SqlFunctionCtx {
|
|||
SFuncExecFuncs fpSet;
|
||||
SScalarFuncExecFuncs sfp;
|
||||
struct SExprInfo *pExpr;
|
||||
struct SDiskbasedBuf *pBuf;
|
||||
struct SSDataBlock *pSrcBlock;
|
||||
struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
|
||||
int32_t curBufPage;
|
||||
SSerializeDataHandle saveHandle;
|
||||
bool isStream;
|
||||
|
||||
char udfName[TSDB_FUNC_NAME_LEN];
|
||||
|
|
|
@ -1,74 +1,74 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_OS_SEMPHONE_H_
|
||||
#define _TD_OS_SEMPHONE_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <semaphore.h>
|
||||
|
||||
#if defined(_TD_DARWIN_64)
|
||||
#include <dispatch/dispatch.h>
|
||||
// typedef struct tsem_s *tsem_t;
|
||||
typedef dispatch_semaphore_t tsem_t;
|
||||
|
||||
int tsem_init(tsem_t *sem, int pshared, unsigned int value);
|
||||
int tsem_wait(tsem_t *sem);
|
||||
int tsem_timewait(tsem_t *sim, int64_t nanosecs);
|
||||
int tsem_post(tsem_t *sem);
|
||||
int tsem_destroy(tsem_t *sem);
|
||||
|
||||
#else
|
||||
|
||||
#define tsem_t sem_t
|
||||
#define tsem_init sem_init
|
||||
int tsem_wait(tsem_t *sem);
|
||||
int tsem_timewait(tsem_t *sim, int64_t nanosecs);
|
||||
#define tsem_post sem_post
|
||||
#define tsem_destroy sem_destroy
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(_TD_DARWIN_64)
|
||||
// #define TdThreadRwlock TdThreadMutex
|
||||
// #define taosThreadRwlockInit(lock, NULL) taosThreadMutexInit(lock, NULL)
|
||||
// #define taosThreadRwlockDestroy(lock) taosThreadMutexDestroy(lock)
|
||||
// #define taosThreadRwlockWrlock(lock) taosThreadMutexLock(lock)
|
||||
// #define taosThreadRwlockRdlock(lock) taosThreadMutexLock(lock)
|
||||
// #define taosThreadRwlockUnlock(lock) taosThreadMutexUnlock(lock)
|
||||
|
||||
// #define TdThreadSpinlock TdThreadMutex
|
||||
// #define taosThreadSpinInit(lock, NULL) taosThreadMutexInit(lock, NULL)
|
||||
// #define taosThreadSpinDestroy(lock) taosThreadMutexDestroy(lock)
|
||||
// #define taosThreadSpinLock(lock) taosThreadMutexLock(lock)
|
||||
// #define taosThreadSpinUnlock(lock) taosThreadMutexUnlock(lock)
|
||||
#endif
|
||||
|
||||
bool taosCheckPthreadValid(TdThread thread);
|
||||
int64_t taosGetSelfPthreadId();
|
||||
int64_t taosGetPthreadId(TdThread thread);
|
||||
void taosResetPthread(TdThread *thread);
|
||||
bool taosComparePthread(TdThread first, TdThread second);
|
||||
int32_t taosGetPId();
|
||||
int32_t taosGetAppName(char *name, int32_t *len);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_OS_SEMPHONE_H_*/
|
||||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_OS_SEMPHONE_H_
|
||||
#define _TD_OS_SEMPHONE_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <semaphore.h>
|
||||
|
||||
#if defined(_TD_DARWIN_64)
|
||||
#include <dispatch/dispatch.h>
|
||||
// typedef struct tsem_s *tsem_t;
|
||||
typedef dispatch_semaphore_t tsem_t;
|
||||
|
||||
int tsem_init(tsem_t *sem, int pshared, unsigned int value);
|
||||
int tsem_wait(tsem_t *sem);
|
||||
int tsem_timewait(tsem_t *sim, int64_t nanosecs);
|
||||
int tsem_post(tsem_t *sem);
|
||||
int tsem_destroy(tsem_t *sem);
|
||||
|
||||
#else
|
||||
|
||||
#define tsem_t sem_t
|
||||
#define tsem_init sem_init
|
||||
int tsem_wait(tsem_t *sem);
|
||||
int tsem_timewait(tsem_t *sim, int64_t nanosecs);
|
||||
#define tsem_post sem_post
|
||||
#define tsem_destroy sem_destroy
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(_TD_DARWIN_64)
|
||||
// #define TdThreadRwlock TdThreadMutex
|
||||
// #define taosThreadRwlockInit(lock, NULL) taosThreadMutexInit(lock, NULL)
|
||||
// #define taosThreadRwlockDestroy(lock) taosThreadMutexDestroy(lock)
|
||||
// #define taosThreadRwlockWrlock(lock) taosThreadMutexLock(lock)
|
||||
// #define taosThreadRwlockRdlock(lock) taosThreadMutexLock(lock)
|
||||
// #define taosThreadRwlockUnlock(lock) taosThreadMutexUnlock(lock)
|
||||
|
||||
// #define TdThreadSpinlock TdThreadMutex
|
||||
// #define taosThreadSpinInit(lock, NULL) taosThreadMutexInit(lock, NULL)
|
||||
// #define taosThreadSpinDestroy(lock) taosThreadMutexDestroy(lock)
|
||||
// #define taosThreadSpinLock(lock) taosThreadMutexLock(lock)
|
||||
// #define taosThreadSpinUnlock(lock) taosThreadMutexUnlock(lock)
|
||||
#endif
|
||||
|
||||
bool taosCheckPthreadValid(TdThread thread);
|
||||
int64_t taosGetSelfPthreadId();
|
||||
int64_t taosGetPthreadId(TdThread thread);
|
||||
void taosResetPthread(TdThread *thread);
|
||||
bool taosComparePthread(TdThread first, TdThread second);
|
||||
int32_t taosGetPId();
|
||||
int32_t taosGetAppName(char *name, int32_t *len);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_OS_SEMPHONE_H_*/
|
||||
|
|
|
@ -105,6 +105,97 @@ int32_t compareStrPatternNotMatch(const void *pLeft, const void *pRight);
|
|||
int32_t compareWStrPatternMatch(const void *pLeft, const void *pRight);
|
||||
int32_t compareWStrPatternNotMatch(const void *pLeft, const void *pRight);
|
||||
|
||||
int32_t compareInt8Int16(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt8Int32(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt8Int64(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt8Float(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt8Double(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt8Uint8(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt8Uint16(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt8Uint32(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt8Uint64(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt16Int8(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt16Int32(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt16Int64(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt16Float(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt16Double(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt16Uint8(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt16Uint16(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt16Uint32(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt16Uint64(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt32Int8(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt32Int16(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt32Int64(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt32Float(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt32Double(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt32Uint8(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt32Uint16(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt32Uint32(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt32Uint64(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt64Int8(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt64Int16(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt64Int32(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt64Float(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt64Double(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt64Uint8(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt64Uint16(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt64Uint32(const void *pLeft, const void *pRight);
|
||||
int32_t compareInt64Uint64(const void *pLeft, const void *pRight);
|
||||
int32_t compareFloatInt8(const void *pLeft, const void *pRight);
|
||||
int32_t compareFloatInt16(const void *pLeft, const void *pRight);
|
||||
int32_t compareFloatInt32(const void *pLeft, const void *pRight);
|
||||
int32_t compareFloatInt64(const void *pLeft, const void *pRight);
|
||||
int32_t compareFloatDouble(const void *pLeft, const void *pRight);
|
||||
int32_t compareFloatUint8(const void *pLeft, const void *pRight);
|
||||
int32_t compareFloatUint16(const void *pLeft, const void *pRight);
|
||||
int32_t compareFloatUint32(const void *pLeft, const void *pRight);
|
||||
int32_t compareFloatUint64(const void *pLeft, const void *pRight);
|
||||
int32_t compareDoubleInt8(const void *pLeft, const void *pRight);
|
||||
int32_t compareDoubleInt16(const void *pLeft, const void *pRight);
|
||||
int32_t compareDoubleInt32(const void *pLeft, const void *pRight);
|
||||
int32_t compareDoubleInt64(const void *pLeft, const void *pRight);
|
||||
int32_t compareDoubleFloat(const void *pLeft, const void *pRight);
|
||||
int32_t compareDoubleUint8(const void *pLeft, const void *pRight);
|
||||
int32_t compareDoubleUint16(const void *pLeft, const void *pRight);
|
||||
int32_t compareDoubleUint32(const void *pLeft, const void *pRight);
|
||||
int32_t compareDoubleUint64(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint8Int8(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint8Int16(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint8Int32(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint8Int64(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint8Float(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint8Double(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint8Uint16(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint8Uint32(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint8Uint64(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint16Int8(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint16Int16(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint16Int32(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint16Int64(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint16Float(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint16Double(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint16Uint8(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint16Uint32(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint16Uint64(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint32Int8(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint32Int16(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint32Int32(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint32Int64(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint32Float(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint32Double(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint32Uint8(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint32Uint16(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint32Uint64(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint64Int8(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint64Int16(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint64Int32(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint64Int64(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint64Float(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint64Double(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint64Uint8(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint64Uint16(const void *pLeft, const void *pRight);
|
||||
int32_t compareUint64Uint32(const void *pLeft, const void *pRight);
|
||||
|
||||
__compar_fn_t getComparFunc(int32_t type, int32_t optr);
|
||||
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order);
|
||||
int32_t doCompare(const char *a, const char *b, int32_t type, size_t size);
|
||||
|
|
|
@ -264,12 +264,14 @@ static FORCE_INLINE int32_t tEncodeDouble(SEncoder* pCoder, double val) {
|
|||
|
||||
static FORCE_INLINE int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len) {
|
||||
if (tEncodeU32v(pCoder, len) < 0) return -1;
|
||||
if (pCoder->data) {
|
||||
if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
|
||||
memcpy(TD_CODER_CURRENT(pCoder), val, len);
|
||||
}
|
||||
if (len) {
|
||||
if (pCoder->data) {
|
||||
if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
|
||||
memcpy(TD_CODER_CURRENT(pCoder), val, len);
|
||||
}
|
||||
|
||||
TD_CODER_MOVE_POS(pCoder, len);
|
||||
TD_CODER_MOVE_POS(pCoder, len);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -414,14 +416,18 @@ static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val) {
|
|||
static FORCE_INLINE int32_t tDecodeBinaryAlloc(SDecoder* pCoder, void** val, uint64_t* len) {
|
||||
uint64_t length = 0;
|
||||
if (tDecodeU64v(pCoder, &length) < 0) return -1;
|
||||
if (len) *len = length;
|
||||
if (length) {
|
||||
if (len) *len = length;
|
||||
|
||||
if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
|
||||
*val = taosMemoryMalloc(length);
|
||||
if (*val == NULL) return -1;
|
||||
memcpy(*val, TD_CODER_CURRENT(pCoder), length);
|
||||
if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
|
||||
*val = taosMemoryMalloc(length);
|
||||
if (*val == NULL) return -1;
|
||||
memcpy(*val, TD_CODER_CURRENT(pCoder), length);
|
||||
|
||||
TD_CODER_MOVE_POS(pCoder, length);
|
||||
TD_CODER_MOVE_POS(pCoder, length);
|
||||
} else {
|
||||
*val = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,11 +58,10 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
|
|||
/**
|
||||
*
|
||||
* @param pBuf
|
||||
* @param groupId
|
||||
* @param pageId
|
||||
* @return
|
||||
*/
|
||||
void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId);
|
||||
void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId);
|
||||
|
||||
/**
|
||||
*
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
def sync_source(branch_name) {
|
||||
sh '''
|
||||
hostname
|
||||
env
|
||||
ip addr|grep 192|awk '{print $2}'|sed "s/\\/.*//"
|
||||
echo ''' + branch_name + '''
|
||||
'''
|
||||
sh '''
|
||||
|
@ -15,6 +15,7 @@ def sync_source(branch_name) {
|
|||
cd ${TDENGINE_ROOT_DIR}
|
||||
git reset --hard
|
||||
git fetch || git fetch
|
||||
rm -rf examples/rust/
|
||||
git checkout ''' + branch_name + ''' -f
|
||||
git branch
|
||||
git pull || git pull
|
||||
|
@ -53,6 +54,16 @@ pipeline {
|
|||
defaultValue:'3.0.0.1',
|
||||
description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
|
||||
)
|
||||
string (
|
||||
name:'toolsVersion',
|
||||
defaultValue:'2.1.2',
|
||||
description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
|
||||
)
|
||||
string (
|
||||
name:'toolsBaseVersion',
|
||||
defaultValue:'2.1.2',
|
||||
description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
|
||||
)
|
||||
}
|
||||
environment{
|
||||
WORK_DIR = '/var/lib/jenkins/workspace'
|
||||
|
@ -61,7 +72,7 @@ pipeline {
|
|||
BRANCH_NAME = '3.0'
|
||||
|
||||
TD_SERVER_TAR = "TDengine-server-${version}-Linux-x64.tar.gz"
|
||||
BASE_TD_SERVER_TAR = "TDengine-server-${baseVersion}-arm64-x64.tar.gz"
|
||||
BASE_TD_SERVER_TAR = "TDengine-server-${baseVersion}-Linux-x64.tar.gz"
|
||||
|
||||
TD_SERVER_ARM_TAR = "TDengine-server-${version}-Linux-arm64.tar.gz"
|
||||
BASE_TD_SERVER_ARM_TAR = "TDengine-server-${baseVersion}-Linux-arm64.tar.gz"
|
||||
|
@ -70,7 +81,7 @@ pipeline {
|
|||
BASE_TD_SERVER_LITE_TAR = "TDengine-server-${baseVersion}-Linux-x64-Lite.tar.gz"
|
||||
|
||||
TD_CLIENT_TAR = "TDengine-client-${version}-Linux-x64.tar.gz"
|
||||
BASE_TD_CLIENT_TAR = "TDengine-client-${baseVersion}-arm64-x64.tar.gz"
|
||||
BASE_TD_CLIENT_TAR = "TDengine-client-${baseVersion}-Linux-x64.tar.gz"
|
||||
|
||||
TD_CLIENT_ARM_TAR = "TDengine-client-${version}-Linux-arm64.tar.gz"
|
||||
BASE_TD_CLIENT_ARM_TAR = "TDengine-client-${baseVersion}-Linux-arm64.tar.gz"
|
||||
|
@ -86,31 +97,22 @@ pipeline {
|
|||
|
||||
TD_CLIENT_EXE = "TDengine-client-${version}-Windows-x64.exe"
|
||||
|
||||
TD_TOOLS_TAR = "taosTools-${toolsVersion}-Linux-x64.tar.gz"
|
||||
|
||||
|
||||
}
|
||||
stages {
|
||||
stage ('RUN') {
|
||||
stage('get check package scritps'){
|
||||
agent{label 'ubuntu18'}
|
||||
steps {
|
||||
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
|
||||
script{
|
||||
sync_source("${BRANCH_NAME}")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
parallel {
|
||||
stage('ubuntu16') {
|
||||
agent{label " ubuntu16 "}
|
||||
steps {
|
||||
timeout(time: 3, unit: 'MINUTES'){
|
||||
timeout(time: 30, unit: 'MINUTES'){
|
||||
sync_source("${BRANCH_NAME}")
|
||||
sh '''
|
||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
|
||||
python3 checkPackageRuning.py
|
||||
rmtaos
|
||||
'''
|
||||
sh '''
|
||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||
|
@ -128,12 +130,12 @@ pipeline {
|
|||
stage('ubuntu18') {
|
||||
agent{label " ubuntu18 "}
|
||||
steps {
|
||||
timeout(time: 3, unit: 'MINUTES'){
|
||||
timeout(time: 30, unit: 'MINUTES'){
|
||||
sync_source("${BRANCH_NAME}")
|
||||
sh '''
|
||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
|
||||
python3 checkPackageRuning.py
|
||||
rmtaos
|
||||
'''
|
||||
sh '''
|
||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||
|
@ -151,12 +153,12 @@ pipeline {
|
|||
stage('centos7') {
|
||||
agent{label " centos7_9 "}
|
||||
steps {
|
||||
timeout(time: 240, unit: 'MINUTES'){
|
||||
timeout(time: 30, unit: 'MINUTES'){
|
||||
sync_source("${BRANCH_NAME}")
|
||||
sh '''
|
||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
|
||||
python3 checkPackageRuning.py
|
||||
rmtaos
|
||||
'''
|
||||
sh '''
|
||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||
|
@ -174,12 +176,12 @@ pipeline {
|
|||
stage('centos8') {
|
||||
agent{label " centos8_3 "}
|
||||
steps {
|
||||
timeout(time: 240, unit: 'MINUTES'){
|
||||
timeout(time: 30, unit: 'MINUTES'){
|
||||
sync_source("${BRANCH_NAME}")
|
||||
sh '''
|
||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||
bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
|
||||
python3 checkPackageRuning.py
|
||||
rmtaos
|
||||
'''
|
||||
sh '''
|
||||
cd ${TDENGINE_ROOT_DIR}/packaging
|
||||
|
|
|
@ -22,10 +22,12 @@ import time
|
|||
# install taospy
|
||||
|
||||
out = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
|
||||
print(out)
|
||||
print("taospy version %s "%out)
|
||||
if (out == "" ):
|
||||
os.system("pip install git+https://github.com/taosdata/taos-connector-python.git")
|
||||
print("install taos python connector")
|
||||
else:
|
||||
os.system("pip3 install --upgrade taospy ")
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -158,7 +158,7 @@ When you build your application with docker, you should add the TDengine client
|
|||
FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
ENV TDENGINE_VERSION=2.4.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
|
@ -265,7 +265,7 @@ Full version of dockerfile could be:
|
|||
```dockerfile
|
||||
FROM golang:1.17.6-buster as builder
|
||||
ENV TDENGINE_VERSION=2.4.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
|
@ -279,7 +279,7 @@ RUN go env && go mod tidy && go build
|
|||
FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y wget
|
||||
ENV TDENGINE_VERSION=2.4.0.0
|
||||
RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
|
||||
&& cd TDengine-client-${TDENGINE_VERSION} \
|
||||
&& ./install_client.sh \
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
#!/bin/sh
|
||||
|
||||
# function installPkgAndCheckFile{
|
||||
|
||||
echo "Download package"
|
||||
|
||||
packgeName=$1
|
||||
version=$2
|
||||
|
@ -10,6 +7,7 @@ originPackageName=$3
|
|||
originversion=$4
|
||||
testFile=$5
|
||||
subFile="taos.tar.gz"
|
||||
password=$6
|
||||
|
||||
if [ ${testFile} = "server" ];then
|
||||
tdPath="TDengine-server-${version}"
|
||||
|
@ -25,22 +23,42 @@ elif [ ${testFile} = "tools" ];then
|
|||
installCmd="install-taostools.sh"
|
||||
fi
|
||||
|
||||
function cmdInstall {
|
||||
comd=$1
|
||||
if command -v ${comd} ;then
|
||||
echo "${comd} is already installed"
|
||||
else
|
||||
if command -v apt ;then
|
||||
apt-get install ${comd} -y
|
||||
elif command -v yum ;then
|
||||
yum -y install ${comd}
|
||||
echo "you should install ${comd} manually"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
echo "Uninstall all components of TDeingne"
|
||||
|
||||
if command -v rmtaos ;then
|
||||
echo "uninstall all components of TDeingne:rmtaos"
|
||||
echo " "
|
||||
rmtaos
|
||||
else
|
||||
echo "os doesn't include TDengine "
|
||||
fi
|
||||
|
||||
if command -v rmtaostools ;then
|
||||
echo "uninstall all components of TDeingne:rmtaostools"
|
||||
echo " "
|
||||
rmtaostools
|
||||
else
|
||||
echo "os doesn't include rmtaostools "
|
||||
fi
|
||||
|
||||
|
||||
cmdInstall tree
|
||||
cmdInstall wget
|
||||
cmdInstall sshpass
|
||||
|
||||
echo "new workroom path"
|
||||
installPath="/usr/local/src/packageTest"
|
||||
oriInstallPath="/usr/local/src/packageTest/3.1"
|
||||
|
@ -58,22 +76,49 @@ else
|
|||
echo "${oriInstallPath} already exists"
|
||||
fi
|
||||
|
||||
echo "decompress installPackage"
|
||||
|
||||
|
||||
|
||||
echo "download installPackage"
|
||||
# cd ${installPath}
|
||||
# wget https://www.taosdata.com/assets-download/3.0/${packgeName}
|
||||
# cd ${oriInstallPath}
|
||||
# wget https://www.taosdata.com/assets-download/3.0/${originPackageName}
|
||||
|
||||
cd ${installPath}
|
||||
wget https://www.taosdata.com/assets-download/3.0/${packgeName}
|
||||
cd ${oriInstallPath}
|
||||
wget https://www.taosdata.com/assets-download/3.0/${originPackageName}
|
||||
|
||||
if [ ! -f {packgeName} ];then
|
||||
sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/${packgeName} .
|
||||
fi
|
||||
if [ ! -f debAuto.sh ];then
|
||||
echo '#!/usr/bin/expect ' > debAuto.sh
|
||||
echo 'set timeout 3 ' >> debAuto.sh
|
||||
echo 'pset packgeName [lindex $argv 0]' >> debAuto.sh
|
||||
echo 'spawn dpkg -i ${packgeName}' >> debAuto.sh
|
||||
echo 'expect "*one:"' >> debAuto.sh
|
||||
echo 'send "\r"' >> debAuto.sh
|
||||
echo 'expect "*skip:"' >> debAuto.sh
|
||||
echo 'send "\r" ' >> debAuto.sh
|
||||
fi
|
||||
|
||||
if [[ ${packgeName} =~ "deb" ]];then
|
||||
echo "dpkg ${packgeName}" && dpkg -i ${packgeName}
|
||||
cd ${installPath}
|
||||
dpkg -r taostools
|
||||
dpkg -r tdengine
|
||||
if [[ ${packgeName} =~ "TDengine" ]];then
|
||||
echo "./debAuto.sh ${packgeName}" && chmod 755 debAuto.sh && ./debAuto.sh ${packgeName}
|
||||
else
|
||||
echo "dpkg -i ${packgeName}" && dpkg -i ${packgeName}
|
||||
|
||||
elif [[ ${packgeName} =~ "rpm" ]];then
|
||||
echo "rpm ${packgeName}" && rpm -ivh ${packgeName}
|
||||
cd ${installPath}
|
||||
echo "rpm ${packgeName}" && rpm -ivh ${packgeName} --quiet
|
||||
elif [[ ${packgeName} =~ "tar" ]];then
|
||||
echo "tar ${packgeName}" && tar -xvf ${packgeName}
|
||||
cd ${oriInstallPath}
|
||||
cd ${oriInstallPath}
|
||||
if [ ! -f {originPackageName} ];then
|
||||
sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${originversion}/community${originPackageName} .
|
||||
fi
|
||||
echo "tar -xvf ${originPackageName}" && tar -xvf ${originPackageName}
|
||||
|
||||
cd ${installPath}
|
||||
echo "tar -xvf ${packgeName}" && tar -xvf ${packgeName}
|
||||
|
||||
|
@ -87,10 +132,10 @@ elif [[ ${packgeName} =~ "tar" ]];then
|
|||
|
||||
cd ${installPath}
|
||||
|
||||
tree ${oriInstallPath}/${originTdpPath} > ${originPackageName}_checkfile
|
||||
tree ${installPath}/${tdPath} > ${packgeName}_checkfile
|
||||
tree ${oriInstallPath}/${originTdpPath} > ${oriInstallPath}/${originPackageName}_checkfile
|
||||
tree ${installPath}/${tdPath} > ${installPath}/${packgeName}_checkfile
|
||||
|
||||
diff ${packgeName}_checkfile ${originPackageName}_checkfile > ${installPath}/diffFile.log
|
||||
diff ${installPath}/${packgeName}_checkfile ${oriInstallPath}/${originPackageName}_checkfile > ${installPath}/diffFile.log
|
||||
diffNumbers=`cat ${installPath}/diffFile.log |wc -l `
|
||||
if [ ${diffNumbers} != 0 ];then
|
||||
echo "The number and names of files have changed from the previous installation package"
|
||||
|
@ -104,9 +149,21 @@ elif [[ ${packgeName} =~ "tar" ]];then
|
|||
else
|
||||
bash ${installCmd}
|
||||
fi
|
||||
if [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]] ;then
|
||||
cd ${installPath}
|
||||
sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.tar.gz .
|
||||
# wget https://www.taosdata.com/assets-download/3.0/taosTools-2.1.2-Linux-x64.tar.gz
|
||||
tar xvf taosTools-2.1.2-Linux-x64.tar.gz
|
||||
cd taosTools-2.1.2 && bash install-taostools.sh
|
||||
elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then
|
||||
cd ${installPath}
|
||||
sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.deb .
|
||||
dpkg -i taosTools-2.1.2-Linux-x64.deb
|
||||
elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then
|
||||
cd ${installPath}
|
||||
sshpass -p ${password} scp 192.168.1.131:/nas/TDengine3/v${version}/community/taosTools-2.1.2-Linux-x64.rpm .
|
||||
rpm -ivh taosTools-2.1.2-Linux-x64.rpm --quiet
|
||||
fi
|
||||
|
||||
fi
|
||||
# }
|
||||
|
||||
# installPkgAndCheckFile
|
||||
fi
|
||||
|
||||
|
|
|
@ -27,11 +27,18 @@ else()
|
|||
INCLUDE_DIRECTORIES(jni/linux)
|
||||
endif()
|
||||
|
||||
set_target_properties(
|
||||
taos
|
||||
PROPERTIES
|
||||
CLEAN_DIRECT_OUTPUT
|
||||
1
|
||||
)
|
||||
|
||||
set_target_properties(
|
||||
taos
|
||||
PROPERTIES
|
||||
VERSION ${TD_VER_NUMBER}
|
||||
SOVERSION ${TD_VER_NUMBER}
|
||||
SOVERSION 1
|
||||
)
|
||||
|
||||
add_library(taos_static STATIC ${CLIENT_SRC})
|
||||
|
|
|
@ -52,15 +52,17 @@ enum {
|
|||
RES_TYPE__QUERY = 1,
|
||||
RES_TYPE__TMQ,
|
||||
RES_TYPE__TMQ_META,
|
||||
RES_TYPE__TAOSX,
|
||||
};
|
||||
|
||||
#define SHOW_VARIABLES_RESULT_COLS 2
|
||||
#define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
|
||||
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
|
||||
|
||||
#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
|
||||
#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
|
||||
#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
|
||||
#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
|
||||
#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ || *(int8_t*)res == RES_TYPE__TAOSX)
|
||||
#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
|
||||
#define TD_RES_TMQ_TAOSX(res) (*(int8_t*)res == RES_TYPE__TAOSX)
|
||||
|
||||
typedef struct SAppInstInfo SAppInstInfo;
|
||||
|
||||
|
@ -200,8 +202,8 @@ typedef struct {
|
|||
int32_t vgId;
|
||||
SSchemaWrapper schema;
|
||||
int32_t resIter;
|
||||
SMqDataRsp rsp;
|
||||
SReqResultInfo resInfo;
|
||||
SMqDataRsp rsp;
|
||||
} SMqRspObj;
|
||||
|
||||
typedef struct {
|
||||
|
@ -212,6 +214,17 @@ typedef struct {
|
|||
SMqMetaRsp metaRsp;
|
||||
} SMqMetaRspObj;
|
||||
|
||||
typedef struct {
|
||||
int8_t resType;
|
||||
char topic[TSDB_TOPIC_FNAME_LEN];
|
||||
char db[TSDB_DB_FNAME_LEN];
|
||||
int32_t vgId;
|
||||
SSchemaWrapper schema;
|
||||
int32_t resIter;
|
||||
SReqResultInfo resInfo;
|
||||
STaosxRsp rsp;
|
||||
} SMqTaosxRspObj;
|
||||
|
||||
typedef struct SRequestObj {
|
||||
int8_t resType; // query or tmq
|
||||
uint64_t requestId;
|
||||
|
|
|
@ -42,6 +42,7 @@ void commit_cb(tmq_t *tmq, int32_t code, void *param) {
|
|||
|
||||
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqConfNewImp(JNIEnv *env, jobject jobj) {
|
||||
tmq_conf_t *conf = tmq_conf_new();
|
||||
jniGetGlobalMethod(env);
|
||||
return (jlong)conf;
|
||||
}
|
||||
|
||||
|
|
|
@ -145,7 +145,7 @@ static int32_t hbProcessStbInfoRsp(void *value, int32_t valueLen, struct SCatalo
|
|||
}
|
||||
|
||||
static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
|
||||
SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
|
||||
SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
|
||||
if (NULL == pReq) {
|
||||
tscWarn("pReq to get activeInfo, may be dropped, refId:%" PRIx64 ", type:%d", pRsp->connKey.tscRid,
|
||||
pRsp->connKey.connType);
|
||||
|
@ -260,6 +260,8 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
|
|||
}
|
||||
}
|
||||
|
||||
taosHashRelease(pAppHbMgr->activeInfo, pReq);
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -914,10 +916,11 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in
|
|||
}
|
||||
|
||||
void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey) {
|
||||
SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
|
||||
SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
|
||||
if (pReq) {
|
||||
tFreeClientHbReq(pReq);
|
||||
taosHashRemove(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
|
||||
taosHashRelease(pAppHbMgr->activeInfo, pReq);
|
||||
}
|
||||
|
||||
if (NULL == pReq) {
|
||||
|
|
|
@ -184,6 +184,19 @@ void taos_free_result(TAOS_RES *res) {
|
|||
SRequestObj *pRequest = (SRequestObj *)res;
|
||||
tscDebug("0x%" PRIx64 " taos_free_result start to free query", pRequest->requestId);
|
||||
destroyRequest(pRequest);
|
||||
} else if (TD_RES_TMQ_TAOSX(res)) {
|
||||
SMqTaosxRspObj *pRsp = (SMqTaosxRspObj *)res;
|
||||
if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
|
||||
if (pRsp->rsp.blockDataLen) taosArrayDestroy(pRsp->rsp.blockDataLen);
|
||||
if (pRsp->rsp.withTbName) taosArrayDestroyP(pRsp->rsp.blockTbName, taosMemoryFree);
|
||||
if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
|
||||
// taosx
|
||||
taosArrayDestroy(pRsp->rsp.createTableLen);
|
||||
taosArrayDestroyP(pRsp->rsp.createTableReq, taosMemoryFree);
|
||||
|
||||
pRsp->resInfo.pRspMsg = NULL;
|
||||
doFreeReqResultInfo(&pRsp->resInfo);
|
||||
taosMemoryFree(pRsp);
|
||||
} else if (TD_RES_TMQ(res)) {
|
||||
SMqRspObj *pRsp = (SMqRspObj *)res;
|
||||
if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
|
||||
|
|
|
@ -164,6 +164,7 @@ typedef struct {
|
|||
union {
|
||||
SMqDataRsp dataRsp;
|
||||
SMqMetaRsp metaRsp;
|
||||
STaosxRsp taosxRsp;
|
||||
};
|
||||
} SMqPollRspWrapper;
|
||||
|
||||
|
@ -1130,21 +1131,29 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
|
|||
tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp);
|
||||
tDecoderClear(&decoder);
|
||||
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
|
||||
} else {
|
||||
ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP);
|
||||
|
||||
tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
|
||||
tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
|
||||
rspType);
|
||||
|
||||
} else if (rspType == TMQ_MSG_TYPE__POLL_META_RSP) {
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
|
||||
tDecodeSMqMetaRsp(&decoder, &pRspWrapper->metaRsp);
|
||||
tDecoderClear(&decoder);
|
||||
memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead));
|
||||
} else if (rspType == TMQ_MSG_TYPE__TAOSX_RSP) {
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
|
||||
tDecodeSTaosxRsp(&decoder, &pRspWrapper->taosxRsp);
|
||||
tDecoderClear(&decoder);
|
||||
memcpy(&pRspWrapper->taosxRsp, pMsg->pData, sizeof(SMqRspHead));
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
taosMemoryFree(pMsg->pData);
|
||||
|
||||
tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
|
||||
tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
|
||||
rspType);
|
||||
|
||||
taosWriteQitem(tmq->mqueue, pRspWrapper);
|
||||
tsem_post(&tmq->rspSem);
|
||||
|
||||
|
@ -1443,6 +1452,24 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper) {
|
|||
return pRspObj;
|
||||
}
|
||||
|
||||
SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
|
||||
SMqTaosxRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqTaosxRspObj));
|
||||
pRspObj->resType = RES_TYPE__TAOSX;
|
||||
tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN);
|
||||
tstrncpy(pRspObj->db, pWrapper->topicHandle->db, TSDB_DB_FNAME_LEN);
|
||||
pRspObj->vgId = pWrapper->vgHandle->vgId;
|
||||
pRspObj->resIter = -1;
|
||||
memcpy(&pRspObj->rsp, &pWrapper->dataRsp, sizeof(SMqTaosxRspObj));
|
||||
|
||||
pRspObj->resInfo.totalRows = 0;
|
||||
pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI;
|
||||
if (!pWrapper->dataRsp.withSchema) {
|
||||
setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
|
||||
}
|
||||
|
||||
return pRspObj;
|
||||
}
|
||||
|
||||
int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
|
||||
/*tscDebug("call poll");*/
|
||||
for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
|
||||
|
@ -1595,6 +1622,30 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
|
|||
pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
}
|
||||
} else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__TAOSX_RSP) {
|
||||
SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper;
|
||||
/*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/
|
||||
int32_t consumerEpoch = atomic_load_32(&tmq->epoch);
|
||||
if (pollRspWrapper->taosxRsp.head.epoch == consumerEpoch) {
|
||||
SMqClientVg* pVg = pollRspWrapper->vgHandle;
|
||||
/*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
|
||||
* rspMsg->msg.rspOffset);*/
|
||||
pVg->currentOffset = pollRspWrapper->taosxRsp.rspOffset;
|
||||
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
|
||||
if (pollRspWrapper->taosxRsp.blockNum == 0) {
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
rspWrapper = NULL;
|
||||
continue;
|
||||
}
|
||||
// build rsp
|
||||
SMqRspObj* pRsp = tmqBuildRspFromWrapper(pollRspWrapper);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
return pRsp;
|
||||
} else {
|
||||
tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d\n",
|
||||
pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
|
||||
taosFreeQitem(pollRspWrapper);
|
||||
}
|
||||
} else {
|
||||
/*printf("handle ep rsp %d\n", rspMsg->head.mqMsgType);*/
|
||||
bool reset = false;
|
||||
|
@ -1707,9 +1758,11 @@ tmq_res_t tmq_get_res_type(TAOS_RES* res) {
|
|||
} else if (TD_RES_TMQ_META(res)) {
|
||||
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
|
||||
if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DELETE) {
|
||||
return TMQ_RES_DATA;
|
||||
return TMQ_RES_TAOSX;
|
||||
}
|
||||
return TMQ_RES_TABLE_META;
|
||||
} else if (TD_RES_TMQ_TAOSX(res)) {
|
||||
return TMQ_RES_TAOSX;
|
||||
} else {
|
||||
return TMQ_RES_INVALID;
|
||||
}
|
||||
|
|
|
@ -427,6 +427,152 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int32_t taosUpdateServerCfg(SConfig *pCfg) {
|
||||
SConfigItem *pItem;
|
||||
ECfgSrcType stype;
|
||||
int32_t numOfCores;
|
||||
int64_t totalMemoryKB;
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfCores");
|
||||
if (pItem == NULL) {
|
||||
return -1;
|
||||
} else {
|
||||
stype = pItem->stype;
|
||||
numOfCores = pItem->fval;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "supportVnodes");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfSupportVnodes = numOfCores * 2;
|
||||
tsNumOfSupportVnodes = TMAX(tsNumOfSupportVnodes, 2);
|
||||
pItem->i32 = tsNumOfSupportVnodes;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfRpcThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfRpcThreads = numOfCores / 2;
|
||||
tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4);
|
||||
pItem->i32 = tsNumOfRpcThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfCommitThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfCommitThreads = numOfCores / 2;
|
||||
tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
|
||||
pItem->i32 = tsNumOfCommitThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfMnodeReadThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfMnodeReadThreads = numOfCores / 8;
|
||||
tsNumOfMnodeReadThreads = TRANGE(tsNumOfMnodeReadThreads, 1, 4);
|
||||
pItem->i32 = tsNumOfMnodeReadThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfVnodeQueryThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfVnodeQueryThreads = numOfCores * 2;
|
||||
tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4);
|
||||
pItem->i32 = tsNumOfVnodeQueryThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfVnodeStreamThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfVnodeStreamThreads = numOfCores / 4;
|
||||
tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4);
|
||||
pItem->i32 = tsNumOfVnodeStreamThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfVnodeFetchThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfVnodeFetchThreads = numOfCores / 4;
|
||||
tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
|
||||
pItem->i32 = tsNumOfVnodeFetchThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfVnodeWriteThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfVnodeWriteThreads = numOfCores;
|
||||
tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
|
||||
pItem->i32 = tsNumOfVnodeWriteThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfVnodeSyncThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfVnodeSyncThreads = numOfCores * 2;
|
||||
tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
|
||||
pItem->i32 = tsNumOfVnodeSyncThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfVnodeRsmaThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfVnodeRsmaThreads = numOfCores;
|
||||
tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4);
|
||||
pItem->i32 = tsNumOfVnodeRsmaThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfQnodeQueryThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfQnodeQueryThreads = numOfCores * 2;
|
||||
tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
|
||||
pItem->i32 = tsNumOfQnodeQueryThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfQnodeFetchThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfQnodeFetchThreads = numOfCores / 2;
|
||||
tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4);
|
||||
pItem->i32 = tsNumOfQnodeFetchThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfSnodeSharedThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfSnodeSharedThreads = numOfCores / 4;
|
||||
tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4);
|
||||
pItem->i32 = tsNumOfSnodeSharedThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "numOfSnodeUniqueThreads");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsNumOfSnodeUniqueThreads = numOfCores / 4;
|
||||
tsNumOfSnodeUniqueThreads = TRANGE(tsNumOfSnodeUniqueThreads, 2, 4);
|
||||
pItem->i32 = tsNumOfSnodeUniqueThreads;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "totalMemoryKB");
|
||||
if (pItem == NULL) {
|
||||
return -1;
|
||||
} else {
|
||||
stype = pItem->stype;
|
||||
totalMemoryKB = pItem->i64;
|
||||
}
|
||||
|
||||
pItem = cfgGetItem(tsCfg, "rpcQueueMemoryAllowed");
|
||||
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
|
||||
tsRpcQueueMemoryAllowed = totalMemoryKB * 1024 * 0.1;
|
||||
tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL);
|
||||
pItem->i64 = tsRpcQueueMemoryAllowed;
|
||||
pItem->stype = stype;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void taosSetClientLogCfg(SConfig *pCfg) {
|
||||
SConfigItem *pItem = cfgGetItem(pCfg, "logDir");
|
||||
tstrncpy(tsLogDir, cfgGetItem(pCfg, "logDir")->str, PATH_MAX);
|
||||
|
@ -981,7 +1127,7 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi
|
|||
taosSetServerLogCfg(pCfg);
|
||||
}
|
||||
|
||||
taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32);
|
||||
taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32, false);
|
||||
|
||||
if (taosMulMkDir(tsLogDir) != 0) {
|
||||
uError("failed to create dir:%s since %s", tsLogDir, terrstr());
|
||||
|
@ -1048,6 +1194,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
|
|||
if (taosSetClientCfg(tsCfg)) return -1;
|
||||
} else {
|
||||
if (taosSetClientCfg(tsCfg)) return -1;
|
||||
if (taosUpdateServerCfg(tsCfg)) return -1;
|
||||
if (taosSetServerCfg(tsCfg)) return -1;
|
||||
if (taosSetTfsCfg(tsCfg) != 0) return -1;
|
||||
}
|
||||
|
@ -1072,7 +1219,7 @@ void taosCleanupCfg() {
|
|||
void taosCfgDynamicOptions(const char *option, const char *value) {
|
||||
if (strncasecmp(option, "debugFlag", 9) == 0) {
|
||||
int32_t flag = atoi(value);
|
||||
taosSetAllDebugFlag(flag);
|
||||
taosSetAllDebugFlag(flag, true);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1097,11 +1244,13 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
|
|||
"dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
|
||||
"tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag",
|
||||
"tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
|
||||
"jniDebugFlag",
|
||||
};
|
||||
int32_t *optionVars[] = {
|
||||
&dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag,
|
||||
&tqDebugFlag, &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag,
|
||||
&tmrDebugFlag, &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag,
|
||||
&jniDebugFlag,
|
||||
};
|
||||
|
||||
int32_t optionSize = tListLen(options);
|
||||
|
@ -1113,41 +1262,42 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
|
|||
int32_t flag = atoi(value);
|
||||
uInfo("%s set from %d to %d", optName, *optionVars[d], flag);
|
||||
*optionVars[d] = flag;
|
||||
taosSetDebugFlag(optionVars[d], optName, flag);
|
||||
taosSetDebugFlag(optionVars[d], optName, flag, true);
|
||||
return;
|
||||
}
|
||||
|
||||
uError("failed to cfg dynamic option:%s value:%s", option, value);
|
||||
}
|
||||
|
||||
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal) {
|
||||
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite) {
|
||||
SConfigItem *pItem = cfgGetItem(tsCfg, flagName);
|
||||
if (pItem != NULL) {
|
||||
if (pItem != NULL && (rewrite || pItem->i32 == 0)) {
|
||||
pItem->i32 = flagVal;
|
||||
}
|
||||
*pFlagPtr = flagVal;
|
||||
}
|
||||
|
||||
void taosSetAllDebugFlag(int32_t flag) {
|
||||
void taosSetAllDebugFlag(int32_t flag, bool rewrite) {
|
||||
if (flag <= 0) return;
|
||||
|
||||
taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag);
|
||||
taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag);
|
||||
taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag);
|
||||
taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag);
|
||||
taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag);
|
||||
taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag);
|
||||
taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag);
|
||||
taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag);
|
||||
taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag);
|
||||
taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag);
|
||||
taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag);
|
||||
taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag);
|
||||
taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag);
|
||||
taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag);
|
||||
taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag);
|
||||
taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag);
|
||||
taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag);
|
||||
taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag);
|
||||
taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite);
|
||||
taosSetDebugFlag(&metaDebugFlag, "tmrDebugFlag", flag, rewrite);
|
||||
uInfo("all debug flag are set to %d", flag);
|
||||
}
|
||||
|
|
|
@ -3330,7 +3330,7 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp*)pRsp)->pSchemas); }
|
||||
void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); }
|
||||
|
||||
void tFreeSTableIndexRsp(void *info) {
|
||||
if (NULL == info) {
|
||||
|
@ -5119,17 +5119,17 @@ int tDecodeSVCreateTbRsp(SDecoder *pCoder, SVCreateTbRsp *pRsp) {
|
|||
} else {
|
||||
pRsp->pMeta = NULL;
|
||||
}
|
||||
|
||||
|
||||
tEndDecode(pCoder);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tFreeSVCreateTbRsp(void* param) {
|
||||
void tFreeSVCreateTbRsp(void *param) {
|
||||
if (NULL == param) {
|
||||
return;
|
||||
}
|
||||
|
||||
SVCreateTbRsp* pRsp = (SVCreateTbRsp*)param;
|
||||
|
||||
SVCreateTbRsp *pRsp = (SVCreateTbRsp *)param;
|
||||
if (pRsp->pMeta) {
|
||||
taosMemoryFree(pRsp->pMeta->pSchemas);
|
||||
taosMemoryFree(pRsp->pMeta);
|
||||
|
@ -5345,7 +5345,7 @@ static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
|
|||
if (tDecodeI32v(pDecoder, &pBlock->numOfRows) < 0) return -1;
|
||||
if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1;
|
||||
if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1;
|
||||
|
||||
|
||||
int32_t meta = 0;
|
||||
if (tDecodeI32(pDecoder, &meta) < 0) return -1;
|
||||
if (meta) {
|
||||
|
@ -5393,12 +5393,12 @@ int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void tFreeSSubmitBlkRsp(void* param) {
|
||||
void tFreeSSubmitBlkRsp(void *param) {
|
||||
if (NULL == param) {
|
||||
return;
|
||||
}
|
||||
|
||||
SSubmitBlkRsp* pRsp = (SSubmitBlkRsp*)param;
|
||||
|
||||
SSubmitBlkRsp *pRsp = (SSubmitBlkRsp *)param;
|
||||
|
||||
taosMemoryFree(pRsp->tblFName);
|
||||
if (pRsp->pMeta) {
|
||||
|
@ -5407,7 +5407,6 @@ void tFreeSSubmitBlkRsp(void* param) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
|
||||
if (NULL == pRsp) return;
|
||||
|
||||
|
@ -5619,7 +5618,6 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp *pRsp) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
int32_t tEncodeSMCreateStbRsp(SEncoder *pEncoder, const SMCreateStbRsp *pRsp) {
|
||||
if (tStartEncode(pEncoder) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pRsp->pMeta->pSchemas ? 1 : 0) < 0) return -1;
|
||||
|
@ -5671,8 +5669,6 @@ void tFreeSMCreateStbRsp(SMCreateStbRsp *pRsp) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal) {
|
||||
if (tEncodeI8(pEncoder, pOffsetVal->type) < 0) return -1;
|
||||
if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
|
@ -5690,7 +5686,7 @@ int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal)
|
|||
|
||||
int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) {
|
||||
if (tDecodeI8(pDecoder, &pOffsetVal->type) < 0) return -1;
|
||||
if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
if (tDecodeI64(pDecoder, &pOffsetVal->uid) < 0) return -1;
|
||||
if (tDecodeI64(pDecoder, &pOffsetVal->ts) < 0) return -1;
|
||||
} else if (pOffsetVal->type == TMQ_OFFSET__LOG) {
|
||||
|
@ -5712,7 +5708,7 @@ int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
|
|||
snprintf(buf, maxLen, "offset(reset to latest)");
|
||||
} else if (pVal->type == TMQ_OFFSET__LOG) {
|
||||
snprintf(buf, maxLen, "offset(log) ver:%" PRId64, pVal->version);
|
||||
} else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
} else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
|
||||
snprintf(buf, maxLen, "offset(ss data) uid:%" PRId64 ", ts:%" PRId64, pVal->uid, pVal->ts);
|
||||
} else {
|
||||
ASSERT(0);
|
||||
|
@ -5813,17 +5809,17 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tEncodeSMqMetaRsp(SEncoder* pEncoder, const SMqMetaRsp* pRsp) {
|
||||
int32_t tEncodeSMqMetaRsp(SEncoder *pEncoder, const SMqMetaRsp *pRsp) {
|
||||
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
|
||||
if(tEncodeI16(pEncoder, pRsp->resMsgType)) return -1;
|
||||
if(tEncodeBinary(pEncoder, pRsp->metaRsp, pRsp->metaRspLen)) return -1;
|
||||
if (tEncodeI16(pEncoder, pRsp->resMsgType)) return -1;
|
||||
if (tEncodeBinary(pEncoder, pRsp->metaRsp, pRsp->metaRspLen)) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDecodeSMqMetaRsp(SDecoder* pDecoder, SMqMetaRsp* pRsp) {
|
||||
int32_t tDecodeSMqMetaRsp(SDecoder *pDecoder, SMqMetaRsp *pRsp) {
|
||||
if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
|
||||
if (tDecodeI16(pDecoder, &pRsp->resMsgType) < 0) return -1;
|
||||
if (tDecodeBinaryAlloc(pDecoder, &pRsp->metaRsp, (uint64_t*)&pRsp->metaRspLen) < 0) return -1;
|
||||
if (tDecodeBinaryAlloc(pDecoder, &pRsp->metaRsp, (uint64_t *)&pRsp->metaRspLen) < 0) return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5893,6 +5889,92 @@ int32_t tDecodeSMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) {
|
||||
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1;
|
||||
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
|
||||
if (tEncodeI32(pEncoder, pRsp->blockNum) < 0) return -1;
|
||||
if (pRsp->blockNum != 0) {
|
||||
if (tEncodeI8(pEncoder, pRsp->withTbName) < 0) return -1;
|
||||
if (tEncodeI8(pEncoder, pRsp->withSchema) < 0) return -1;
|
||||
|
||||
for (int32_t i = 0; i < pRsp->blockNum; i++) {
|
||||
int32_t bLen = *(int32_t *)taosArrayGet(pRsp->blockDataLen, i);
|
||||
void *data = taosArrayGetP(pRsp->blockData, i);
|
||||
if (tEncodeBinary(pEncoder, (const uint8_t *)data, bLen) < 0) return -1;
|
||||
if (pRsp->withSchema) {
|
||||
SSchemaWrapper *pSW = (SSchemaWrapper *)taosArrayGetP(pRsp->blockSchema, i);
|
||||
if (tEncodeSSchemaWrapper(pEncoder, pSW) < 0) return -1;
|
||||
}
|
||||
if (pRsp->withTbName) {
|
||||
char *tbName = (char *)taosArrayGetP(pRsp->blockTbName, i);
|
||||
if (tEncodeCStr(pEncoder, tbName) < 0) return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (tEncodeI32(pEncoder, pRsp->createTableNum) < 0) return -1;
|
||||
if (pRsp->createTableNum) {
|
||||
for (int32_t i = 0; i < pRsp->createTableNum; i++) {
|
||||
void *createTableReq = taosArrayGetP(pRsp->createTableReq, i);
|
||||
int32_t createTableLen = *(int32_t *)taosArrayGet(pRsp->createTableLen, i);
|
||||
if (tEncodeBinary(pEncoder, createTableReq, createTableLen) < 0) return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) {
|
||||
if (tDecodeSTqOffsetVal(pDecoder, &pRsp->reqOffset) < 0) return -1;
|
||||
if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
|
||||
if (tDecodeI32(pDecoder, &pRsp->blockNum) < 0) return -1;
|
||||
if (pRsp->blockNum != 0) {
|
||||
pRsp->blockData = taosArrayInit(pRsp->blockNum, sizeof(void *));
|
||||
pRsp->blockDataLen = taosArrayInit(pRsp->blockNum, sizeof(int32_t));
|
||||
if (tDecodeI8(pDecoder, &pRsp->withTbName) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pRsp->withSchema) < 0) return -1;
|
||||
if (pRsp->withTbName) {
|
||||
pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void *));
|
||||
}
|
||||
if (pRsp->withSchema) {
|
||||
pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void *));
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pRsp->blockNum; i++) {
|
||||
void *data;
|
||||
uint64_t bLen;
|
||||
if (tDecodeBinaryAlloc(pDecoder, &data, &bLen) < 0) return -1;
|
||||
taosArrayPush(pRsp->blockData, &data);
|
||||
int32_t len = bLen;
|
||||
taosArrayPush(pRsp->blockDataLen, &len);
|
||||
|
||||
if (pRsp->withSchema) {
|
||||
SSchemaWrapper *pSW = (SSchemaWrapper *)taosMemoryCalloc(1, sizeof(SSchemaWrapper));
|
||||
if (pSW == NULL) return -1;
|
||||
if (tDecodeSSchemaWrapper(pDecoder, pSW) < 0) return -1;
|
||||
taosArrayPush(pRsp->blockSchema, &pSW);
|
||||
}
|
||||
|
||||
if (pRsp->withTbName) {
|
||||
char *tbName;
|
||||
if (tDecodeCStrAlloc(pDecoder, &tbName) < 0) return -1;
|
||||
taosArrayPush(pRsp->blockTbName, &tbName);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (tDecodeI32(pDecoder, &pRsp->createTableNum) < 0) return -1;
|
||||
if (pRsp->createTableNum) {
|
||||
pRsp->createTableLen = taosArrayInit(pRsp->createTableNum, sizeof(int32_t));
|
||||
pRsp->createTableReq = taosArrayInit(pRsp->createTableNum, sizeof(void *));
|
||||
for (int32_t i = 0; i < pRsp->createTableNum; i++) {
|
||||
void *pCreate = NULL;
|
||||
uint64_t len;
|
||||
if (tDecodeBinaryAlloc(pDecoder, &pCreate, &len) < 0) return -1;
|
||||
int32_t l = (int32_t)len;
|
||||
taosArrayPush(pRsp->createTableLen, &l);
|
||||
taosArrayPush(pRsp->createTableReq, &pCreate);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
int32_t tEncodeSSingleDeleteReq(SEncoder *pEncoder, const SSingleDeleteReq *pReq) {
|
||||
if (tEncodeI64(pEncoder, pReq->uid) < 0) return -1;
|
||||
if (tEncodeI64(pEncoder, pReq->ts) < 0) return -1;
|
||||
|
|
|
@ -287,6 +287,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
|
|||
if (consumerVgNum > minVgCnt) {
|
||||
if (imbCnt < imbConsumerNum) {
|
||||
if (consumerVgNum == minVgCnt + 1) {
|
||||
imbCnt++;
|
||||
continue;
|
||||
} else {
|
||||
// pop until equal minVg + 1
|
||||
|
|
|
@ -157,17 +157,17 @@ void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
|
|||
size_t tsdbCacheGetCapacity(SVnode *pVnode);
|
||||
|
||||
// tq
|
||||
typedef struct SMetaTableInfo{
|
||||
typedef struct SMetaTableInfo {
|
||||
int64_t suid;
|
||||
int64_t uid;
|
||||
SSchemaWrapper *schema;
|
||||
char tbName[TSDB_TABLE_NAME_LEN];
|
||||
}SMetaTableInfo;
|
||||
} SMetaTableInfo;
|
||||
|
||||
typedef struct SIdInfo{
|
||||
int64_t version;
|
||||
int32_t index;
|
||||
}SIdInfo;
|
||||
typedef struct SIdInfo {
|
||||
int64_t version;
|
||||
int32_t index;
|
||||
} SIdInfo;
|
||||
|
||||
typedef struct SSnapContext {
|
||||
SMeta *pMeta;
|
||||
|
@ -180,8 +180,8 @@ typedef struct SSnapContext {
|
|||
SArray *idList;
|
||||
int32_t index;
|
||||
bool withMeta;
|
||||
bool queryMetaOrData; // true-get meta, false-get data
|
||||
}SSnapContext;
|
||||
bool queryMetaOrData; // true-get meta, false-get data
|
||||
} SSnapContext;
|
||||
|
||||
typedef struct STqReader {
|
||||
int64_t ver;
|
||||
|
@ -232,11 +232,12 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWr
|
|||
int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *pSnapshot);
|
||||
int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData);
|
||||
|
||||
int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta, SSnapContext** ctxRet);
|
||||
int32_t getMetafromSnapShot(SSnapContext* ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid);
|
||||
SMetaTableInfo getUidfromSnapShot(SSnapContext* ctx);
|
||||
int32_t setForSnapShot(SSnapContext* ctx, int64_t uid);
|
||||
int32_t destroySnapContext(SSnapContext* ctx);
|
||||
int32_t buildSnapContext(SMeta *pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta,
|
||||
SSnapContext **ctxRet);
|
||||
int32_t getMetafromSnapShot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid);
|
||||
SMetaTableInfo getUidfromSnapShot(SSnapContext *ctx);
|
||||
int32_t setForSnapShot(SSnapContext *ctx, int64_t uid);
|
||||
int32_t destroySnapContext(SSnapContext *ctx);
|
||||
|
||||
// structs
|
||||
struct STsdbCfg {
|
||||
|
@ -259,6 +260,7 @@ typedef struct {
|
|||
int64_t numOfNTables;
|
||||
int64_t numOfNTimeSeries;
|
||||
int64_t numOfTimeSeries;
|
||||
int64_t itvTimeSeries;
|
||||
int64_t pointsWritten;
|
||||
int64_t totalStorage;
|
||||
int64_t compStorage;
|
||||
|
|
|
@ -95,6 +95,7 @@ struct SRSmaStat {
|
|||
int64_t refId; // shared by fetch tasks
|
||||
volatile int64_t nBufItems; // number of items in queue buffer
|
||||
SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo)
|
||||
volatile int32_t nFetchAll; // active number of fetch all
|
||||
int8_t triggerStat; // shared by fetch tasks
|
||||
int8_t commitStat; // 0 not in committing, 1 in committing
|
||||
SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w)
|
||||
|
|
|
@ -88,7 +88,7 @@ typedef struct {
|
|||
STqExecTb execTb;
|
||||
STqExecDb execDb;
|
||||
};
|
||||
// int32_t numOfCols; // number of out pout column, temporarily used
|
||||
int32_t numOfCols; // number of out pout column, temporarily used
|
||||
SSchemaWrapper* pSchemaWrapper; // columns that are involved in query
|
||||
} STqExecHandle;
|
||||
|
||||
|
|
|
@ -615,9 +615,13 @@ int64_t metaGetTbNum(SMeta *pMeta) {
|
|||
// N.B. Called by statusReq per second
|
||||
int64_t metaGetTimeSeriesNum(SMeta *pMeta) {
|
||||
// sum of (number of columns of stable - 1) * number of ctables (excluding timestamp column)
|
||||
int64_t num = 0;
|
||||
vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
|
||||
pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
|
||||
if (pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 || ++pMeta->pVnode->config.vndStats.itvTimeSeries % 60 == 0) {
|
||||
int64_t num = 0;
|
||||
vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
|
||||
pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
|
||||
|
||||
pMeta->pVnode->config.vndStats.itvTimeSeries = 0;
|
||||
}
|
||||
|
||||
return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries;
|
||||
}
|
||||
|
@ -890,7 +894,7 @@ const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
|
|||
|
||||
#ifdef TAG_FILTER_DEBUG
|
||||
if (IS_VAR_DATA_TYPE(val->type)) {
|
||||
char* buf = taosMemoryCalloc(val->nData + 1, 1);
|
||||
char *buf = taosMemoryCalloc(val->nData + 1, 1);
|
||||
memcpy(buf, val->pData, val->nData);
|
||||
metaDebug("metaTag table val varchar index:%d cid:%d type:%d value:%s", 1, val->cid, val->type, buf);
|
||||
taosMemoryFree(buf);
|
||||
|
@ -900,13 +904,13 @@ const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
|
|||
metaDebug("metaTag table val number index:%d cid:%d type:%d value:%f", 1, val->cid, val->type, dval);
|
||||
}
|
||||
|
||||
SArray* pTagVals = NULL;
|
||||
tTagToValArray((STag*)pTag, &pTagVals);
|
||||
SArray *pTagVals = NULL;
|
||||
tTagToValArray((STag *)pTag, &pTagVals);
|
||||
for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
|
||||
STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, i);
|
||||
STagVal *pTagVal = (STagVal *)taosArrayGet(pTagVals, i);
|
||||
|
||||
if (IS_VAR_DATA_TYPE(pTagVal->type)) {
|
||||
char* buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
|
||||
char *buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
|
||||
memcpy(buf, pTagVal->pData, pTagVal->nData);
|
||||
metaDebug("metaTag table varchar index:%d cid:%d type:%d value:%s", i, pTagVal->cid, pTagVal->type, buf);
|
||||
taosMemoryFree(buf);
|
||||
|
|
|
@ -172,7 +172,7 @@ static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
|
|||
TdDirPtr pDir = NULL;
|
||||
TdDirEntryPtr pDirEntry = NULL;
|
||||
char dir[TSDB_FILENAME_LEN];
|
||||
const char *pattern = "v[0-9]+qtaskinfo\\.ver([0-9]+)?$";
|
||||
const char *pattern = "v[0-9]+qinf\\.v([0-9]+)?$";
|
||||
regex_t regex;
|
||||
int code = 0;
|
||||
|
||||
|
@ -312,15 +312,22 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
|
|||
|
||||
SSmaStat *pStat = SMA_ENV_STAT(pEnv);
|
||||
SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat);
|
||||
int32_t nLoops = 0;
|
||||
|
||||
// step 1: set rsma stat
|
||||
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
|
||||
atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 1);
|
||||
while (atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 1) != 0) {
|
||||
++nLoops;
|
||||
if (nLoops > 1000) {
|
||||
sched_yield();
|
||||
nLoops = 0;
|
||||
}
|
||||
}
|
||||
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
|
||||
ASSERT(pRSmaStat->commitAppliedVer > 0);
|
||||
|
||||
// step 2: wait for all triggered fetch tasks to finish
|
||||
int32_t nLoops = 0;
|
||||
nLoops = 0;
|
||||
while (1) {
|
||||
if (T_REF_VAL_GET(pStat) == 0) {
|
||||
smaDebug("vgId:%d, rsma commit, fetch tasks are all finished", SMA_VID(pSma));
|
||||
|
@ -344,7 +351,8 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
|
|||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
||||
smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma), (void*)taosGetSelfPthreadId());
|
||||
smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma),
|
||||
(void *)taosGetSelfPthreadId());
|
||||
nLoops = 0;
|
||||
while (atomic_load_64(&pRSmaStat->nBufItems) > 0) {
|
||||
++nLoops;
|
||||
|
@ -359,7 +367,7 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
|
|||
}
|
||||
smaInfo("vgId:%d, rsma commit, operator state commited, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
|
||||
|
||||
#if 0 // consuming task of qTaskInfo clone
|
||||
#if 0 // consuming task of qTaskInfo clone
|
||||
// step 4: swap queue/qall and iQueue/iQall
|
||||
// lock
|
||||
// taosWLockLatch(SMA_ENV_LOCK(pEnv));
|
||||
|
|
|
@ -21,17 +21,17 @@
|
|||
#define RSMA_SUBMIT_BATCH_SIZE (1024) // cnt
|
||||
#define RSMA_FETCH_DELAY_MAX (900000) // ms
|
||||
#define RSMA_FETCH_ACTIVE_MAX (1800) // ms
|
||||
#define RSMA_FETCH_INTERVAL (5000) // ms
|
||||
|
||||
SSmaMgmt smaMgmt = {
|
||||
.inited = 0,
|
||||
.rsetId = -1,
|
||||
};
|
||||
|
||||
#define TD_QTASKINFO_FNAME_PREFIX "qtaskinfo.ver"
|
||||
#define TD_RSMAINFO_DEL_FILE "rsmainfo.del"
|
||||
#define TD_QTASKINFO_FNAME_PREFIX "qinf.v"
|
||||
|
||||
typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem;
|
||||
typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter;
|
||||
typedef struct SRSmaExecQItem SRSmaExecQItem;
|
||||
|
||||
static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid);
|
||||
static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids);
|
||||
|
@ -82,11 +82,6 @@ struct SRSmaQTaskInfoIter {
|
|||
int32_t nBufPos;
|
||||
};
|
||||
|
||||
struct SRSmaExecQItem {
|
||||
void *pRSmaInfo;
|
||||
void *qall;
|
||||
};
|
||||
|
||||
void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t version, char *outputName) {
|
||||
tdGetVndFileName(vgId, NULL, VNODE_RSMA_DIR, TD_QTASKINFO_FNAME_PREFIX, version, outputName);
|
||||
}
|
||||
|
@ -1501,13 +1496,13 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
|
|||
switch (rsmaTriggerStat) {
|
||||
case TASK_TRIGGER_STAT_PAUSED:
|
||||
case TASK_TRIGGER_STAT_CANCELLED: {
|
||||
tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
|
||||
smaDebug("vgId:%d, rsma fetch task not start for level %" PRIi8 " since stat is %" PRIi8
|
||||
", rsetId rsetId:%" PRIi64 " refId:%d",
|
||||
SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaInfo->refId);
|
||||
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
|
||||
taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
|
||||
taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
|
||||
}
|
||||
tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
|
||||
return;
|
||||
}
|
||||
default:
|
||||
|
@ -1518,7 +1513,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
|
|||
atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE);
|
||||
switch (fetchTriggerStat) {
|
||||
case TASK_TRIGGER_STAT_ACTIVE: {
|
||||
smaDebug("vgId:%d, rsma fetch task started for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
|
||||
smaDebug("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
|
||||
SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
|
||||
// async process
|
||||
pItem->fetchLevel = pItem->level;
|
||||
|
@ -1531,8 +1526,6 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
|
|||
if (atomic_load_8(&pRSmaInfo->assigned) == 0) {
|
||||
tsem_post(&(pStat->notEmpty));
|
||||
}
|
||||
smaInfo("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64, SMA_VID(pSma), pItem->level,
|
||||
pRSmaInfo->suid);
|
||||
} break;
|
||||
case TASK_TRIGGER_STAT_PAUSED: {
|
||||
smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is paused",
|
||||
|
@ -1715,15 +1708,30 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
|
|||
smaDebug("vgId:%d, batchSize:%d, execType:%" PRIi8, SMA_VID(pSma), qallItemSize, type);
|
||||
}
|
||||
|
||||
if (type == RSMA_EXEC_OVERFLOW) {
|
||||
int8_t oldStat = atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 2);
|
||||
if (oldStat == 0 ||
|
||||
((oldStat == 2) && atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat)) < TASK_TRIGGER_STAT_PAUSED)) {
|
||||
atomic_fetch_add_32(&pRSmaStat->nFetchAll, 1);
|
||||
tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
|
||||
if (0 == atomic_sub_fetch_32(&pRSmaStat->nFetchAll, 1)) {
|
||||
atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (qallItemSize > 0) {
|
||||
atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize);
|
||||
continue;
|
||||
} else if (RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
|
||||
continue;
|
||||
if (atomic_load_8(RSMA_COMMIT_STAT(pRSmaStat)) == 0) {
|
||||
continue;
|
||||
}
|
||||
for (int32_t j = 0; j < TSDB_RETENTION_L2; ++j) {
|
||||
SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, j);
|
||||
if (pItem->fetchLevel) {
|
||||
pItem->fetchLevel = 0;
|
||||
taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -1775,7 +1783,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
|
|||
if (pEnv->flag & SMA_ENV_FLG_CLOSE) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
tsem_wait(&pRSmaStat->notEmpty);
|
||||
|
||||
if ((pEnv->flag & SMA_ENV_FLG_CLOSE) && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) {
|
||||
|
|
|
@ -596,7 +596,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
|
|||
req.qmsg = NULL;
|
||||
|
||||
pHandle->execHandle.task =
|
||||
qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, NULL,
|
||||
qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols,
|
||||
&pHandle->execHandle.pSchemaWrapper);
|
||||
ASSERT(pHandle->execHandle.task);
|
||||
void* scanner = NULL;
|
||||
|
|
|
@ -110,7 +110,12 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqMetaRsp*
|
|||
taosArrayPush(pRsp->blockSchema, &pSW);
|
||||
}
|
||||
}
|
||||
tqAddBlockDataToRsp(pDataBlock, pRsp, taosArrayGetSize(pDataBlock->pDataBlock));
|
||||
|
||||
if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN){
|
||||
tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
|
||||
}else{
|
||||
tqAddBlockDataToRsp(pDataBlock, pRsp, taosArrayGetSize(pDataBlock->pDataBlock));
|
||||
}
|
||||
pRsp->blockNum++;
|
||||
if (pOffset->type == TMQ_OFFSET__LOG) {
|
||||
continue;
|
||||
|
|
|
@ -260,7 +260,7 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
|
|||
if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
|
||||
|
||||
handle.execHandle.task = qCreateQueueExecTaskInfo(
|
||||
handle.execHandle.execCol.qmsg, &reader, NULL, &handle.execHandle.pSchemaWrapper);
|
||||
handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols, &handle.execHandle.pSchemaWrapper);
|
||||
ASSERT(handle.execHandle.task);
|
||||
void* scanner = NULL;
|
||||
qExtractStreamScanner(handle.execHandle.task, &scanner);
|
||||
|
|
|
@ -471,6 +471,7 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p
|
|||
len += sprintf(buf2 + VARSTR_HEADER_SIZE, "CREATE TABLE `%s` (", tbName);
|
||||
appendColumnFields(buf2, &len, pCfg);
|
||||
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, ")");
|
||||
appendTableOptions(buf2, &len, pDbCfg, pCfg);
|
||||
}
|
||||
|
||||
varDataLen(buf2) = len;
|
||||
|
|
|
@ -303,6 +303,7 @@ typedef struct SAggSupporter {
|
|||
char* keyBuf; // window key buffer
|
||||
SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
|
||||
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
|
||||
int32_t currentPageId; // current write page id
|
||||
} SAggSupporter;
|
||||
|
||||
typedef struct {
|
||||
|
@ -327,7 +328,6 @@ typedef struct STableScanInfo {
|
|||
SQueryTableDataCond cond;
|
||||
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
|
||||
int32_t dataBlockLoadFlag;
|
||||
// SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
|
||||
SSampleExecInfo sample; // sample execution info
|
||||
int32_t currentGroupId;
|
||||
int32_t currentTable;
|
||||
|
@ -431,6 +431,7 @@ typedef struct SStreamAggSupporter {
|
|||
char* pKeyBuf; // window key buffer
|
||||
SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
|
||||
int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
|
||||
int32_t currentPageId; // buffer page that is active
|
||||
SSDataBlock* pScanBlock;
|
||||
} SStreamAggSupporter;
|
||||
|
||||
|
@ -1009,7 +1010,7 @@ int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimary
|
|||
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
|
||||
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
|
||||
int32_t size);
|
||||
SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize);
|
||||
SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize);
|
||||
SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
|
||||
TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex);
|
||||
SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
|
||||
|
|
|
@ -46,8 +46,8 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
|
|||
rowSize += pCtx[i].resDataInfo.interBufSize;
|
||||
}
|
||||
|
||||
rowSize +=
|
||||
(numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(doSaveTupleData)
|
||||
rowSize += (numOfOutput * sizeof(bool));
|
||||
// expand rowSize to mark if col is null for top/bottom result(saveTupleData)
|
||||
return rowSize;
|
||||
}
|
||||
|
||||
|
@ -1178,7 +1178,6 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
|
|||
SqlFunctionCtx* pCtx = &pFuncCtx[i];
|
||||
|
||||
pCtx->functionId = -1;
|
||||
pCtx->curBufPage = -1;
|
||||
pCtx->pExpr = pExpr;
|
||||
|
||||
if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) {
|
||||
|
@ -1222,6 +1221,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
|
|||
pCtx->isStream = false;
|
||||
|
||||
pCtx->param = pFunct->pParam;
|
||||
pCtx->saveHandle.currentPage = -1;
|
||||
}
|
||||
|
||||
for (int32_t i = 1; i < numOfOutput; ++i) {
|
||||
|
|
|
@ -177,13 +177,13 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* n
|
|||
|
||||
// extract the number of output columns
|
||||
SDataBlockDescNode* pDescNode = pPlan->pNode->pOutputDataBlockDesc;
|
||||
if(numOfCols) *numOfCols = 0;
|
||||
*numOfCols = 0;
|
||||
|
||||
SNode* pNode;
|
||||
FOREACH(pNode, pDescNode->pSlots) {
|
||||
SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode;
|
||||
if (pSlotDesc->output) {
|
||||
if(numOfCols) ++(*numOfCols);
|
||||
++(*numOfCols);
|
||||
}
|
||||
}
|
||||
|
||||
|
|