diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index 4da2d80fbb..37dd448b87 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG c4a567b
+ GIT_TAG 6a2d9fc
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/docs/en/01-index.md b/docs/en/01-index.md
index 13552ea9dc..296cd96898 100644
--- a/docs/en/01-index.md
+++ b/docs/en/01-index.md
@@ -1,6 +1,7 @@
---
title: TDengine Documentation
sidebar_label: Documentation Home
+description: This website contains the user manuals for TDengine, an open-source, cloud-native time-series database optimized for IoT, Connected Cars, and Industrial IoT.
slug: /
---
diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md
index a60bfab2cc..95dd4324f1 100644
--- a/docs/en/02-intro/index.md
+++ b/docs/en/02-intro/index.md
@@ -1,5 +1,6 @@
---
title: Introduction
+description: This document introduces the major features, competitive advantages, typical use cases, and benchmarks of TDengine.
toc_max_heading_level: 2
---
diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md
index 0b1b226c17..771a061c31 100644
--- a/docs/en/04-concept/index.md
+++ b/docs/en/04-concept/index.md
@@ -1,5 +1,6 @@
---
title: Concepts
+description: This document describes the basic concepts of TDengine, including the supertable.
---
In order to explain the basic concepts and provide some sample code, the TDengine documentation smart meters as a typical time series use case. We assume the following: 1. Each smart meter collects three metrics i.e. current, voltage, and phase; 2. There are multiple smart meters; 3. Each meter has static attributes like location and group ID. Based on this, collected data will look similar to the following table:
diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md
index ac273daba4..42e6861674 100644
--- a/docs/en/05-get-started/01-docker.md
+++ b/docs/en/05-get-started/01-docker.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Docker
title: Quick Install on Docker
+sidebar_label: Docker
+description: This document describes how to install TDengine in a Docker container and perform queries and inserts.
---
This document describes how to install TDengine in a Docker container and perform queries and inserts.
diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md
index 54d2e046c2..a0c1d93983 100644
--- a/docs/en/05-get-started/03-package.md
+++ b/docs/en/05-get-started/03-package.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Package
title: Quick Install from Package
+sidebar_label: Package
+description: This document describes how to install TDengine on Linux, Windows, and macOS and perform queries and inserts.
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/05-get-started/index.md b/docs/en/05-get-started/index.md
index 12cfa22c69..66573a89cd 100644
--- a/docs/en/05-get-started/index.md
+++ b/docs/en/05-get-started/index.md
@@ -1,6 +1,6 @@
---
title: Get Started
-description: This article describes how to install TDengine and test its performance.
+description: This document describes how to install TDengine on various platforms.
---
import GitHubSVG from './github.svg'
diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md
index 45bbaa2751..913c24f189 100644
--- a/docs/en/07-develop/01-connect/index.md
+++ b/docs/en/07-develop/01-connect/index.md
@@ -1,7 +1,7 @@
---
-sidebar_label: Connect
title: Connect to TDengine
-description: "How to establish connections to TDengine and how to install and use TDengine connectors."
+sidebar_label: Connect
+description: This document describes how to establish connections to TDengine and how to install and use TDengine connectors.
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/07-develop/02-model/index.mdx b/docs/en/07-develop/02-model/index.mdx
index 19a239805f..db5a259cfe 100644
--- a/docs/en/07-develop/02-model/index.mdx
+++ b/docs/en/07-develop/02-model/index.mdx
@@ -1,5 +1,6 @@
---
title: Data Model
+description: This document describes the data model of TDengine.
---
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
diff --git a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx
index f2168645ff..32ba53a0cb 100644
--- a/docs/en/07-develop/03-insert-data/01-sql-writing.mdx
+++ b/docs/en/07-develop/03-insert-data/01-sql-writing.mdx
@@ -1,5 +1,6 @@
---
title: Insert Using SQL
+description: This document describes how to insert data into TDengine using SQL.
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/07-develop/03-insert-data/20-kafka-writting.mdx b/docs/en/07-develop/03-insert-data/20-kafka-writting.mdx
index ffb969a8a6..89ca10b669 100644
--- a/docs/en/07-develop/03-insert-data/20-kafka-writting.mdx
+++ b/docs/en/07-develop/03-insert-data/20-kafka-writting.mdx
@@ -1,5 +1,6 @@
---
title: Write from Kafka
+description: This document describes how to insert data into TDengine using Kafka.
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx b/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx
index fc5644850c..c559883d26 100644
--- a/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx
+++ b/docs/en/07-develop/03-insert-data/30-influxdb-line.mdx
@@ -1,6 +1,7 @@
---
-sidebar_label: InfluxDB Line Protocol
title: InfluxDB Line Protocol
+sidebar_label: InfluxDB Line Protocol
+description: This document describes how to insert data into TDengine using the InfluxDB Line Protocol.
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/07-develop/03-insert-data/40-opentsdb-telnet.mdx b/docs/en/07-develop/03-insert-data/40-opentsdb-telnet.mdx
index 5d3f25dca9..30bc3b87bf 100644
--- a/docs/en/07-develop/03-insert-data/40-opentsdb-telnet.mdx
+++ b/docs/en/07-develop/03-insert-data/40-opentsdb-telnet.mdx
@@ -1,6 +1,7 @@
---
-sidebar_label: OpenTSDB Line Protocol
title: OpenTSDB Line Protocol
+sidebar_label: OpenTSDB Line Protocol
+description: This document describes how to insert data into TDengine using the OpenTSDB Line Protocol.
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx b/docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx
index 214580c179..e9db130241 100644
--- a/docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx
+++ b/docs/en/07-develop/03-insert-data/50-opentsdb-json.mdx
@@ -1,6 +1,7 @@
---
-sidebar_label: OpenTSDB JSON Protocol
title: OpenTSDB JSON Protocol
+sidebar_label: OpenTSDB JSON Protocol
+description: This document describes how to insert data into TDengine using the OpenTSDB JSON protocol.
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/07-develop/03-insert-data/60-high-volume.md b/docs/en/07-develop/03-insert-data/60-high-volume.md
index 272a138813..d5afa8ef6e 100644
--- a/docs/en/07-develop/03-insert-data/60-high-volume.md
+++ b/docs/en/07-develop/03-insert-data/60-high-volume.md
@@ -1,6 +1,7 @@
---
-sidebar_label: High Performance Writing
title: High Performance Writing
+sidebar_label: High Performance Writing
+description: This document describes how to achieve high performance when writing data into TDengine.
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/07-develop/03-insert-data/index.md b/docs/en/07-develop/03-insert-data/index.md
index 1a71e719a5..15f8f4ee9e 100644
--- a/docs/en/07-develop/03-insert-data/index.md
+++ b/docs/en/07-develop/03-insert-data/index.md
@@ -1,5 +1,6 @@
---
title: Insert Data
+description: This document describes how to insert data into TDengine.
---
TDengine supports multiple protocols of inserting data, including SQL, InfluxDB Line protocol, OpenTSDB Telnet protocol, and OpenTSDB JSON protocol. Data can be inserted row by row, or in batches. Data from one or more collection points can be inserted simultaneously. Data can be inserted with multiple threads, and out of order data and historical data can be inserted as well. InfluxDB Line protocol, OpenTSDB Telnet protocol and OpenTSDB JSON protocol are the 3 kinds of schemaless insert protocols supported by TDengine. It's not necessary to create STables and tables in advance if using schemaless protocols, and the schemas can be adjusted automatically based on the data being inserted.
diff --git a/docs/en/07-develop/04-query-data/index.mdx b/docs/en/07-develop/04-query-data/index.mdx
index 38dc98d1ff..7e167bb4f3 100644
--- a/docs/en/07-develop/04-query-data/index.mdx
+++ b/docs/en/07-develop/04-query-data/index.mdx
@@ -1,6 +1,6 @@
---
title: Query Data
-description: "This chapter introduces major query functionalities and how to perform sync and async query using connectors."
+description: This document describes how to query data in TDengine and how to perform synchronous and asynchronous queries using connectors.
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/07-develop/06-stream.md b/docs/en/07-develop/06-stream.md
index 36f903ee9a..125173e60b 100644
--- a/docs/en/07-develop/06-stream.md
+++ b/docs/en/07-develop/06-stream.md
@@ -1,7 +1,7 @@
---
-sidebar_label: Stream Processing
-description: "The TDengine stream processing engine combines data inserts, preprocessing, analytics, real-time computation, and alerting into a single component."
title: Stream Processing
+sidebar_label: Stream Processing
+description: This document describes the stream processing component of TDengine.
---
Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. In a traditional time-series solution, this generally requires the deployment of stream processing systems such as Kafka or Flink. However, the complexity of such systems increases the cost of development and maintenance.
diff --git a/docs/en/07-develop/08-cache.md b/docs/en/07-develop/08-cache.md
index 82a4787016..6a6ca3e594 100644
--- a/docs/en/07-develop/08-cache.md
+++ b/docs/en/07-develop/08-cache.md
@@ -1,7 +1,7 @@
---
-sidebar_label: Caching
title: Caching
-description: "This document describes the caching component of TDengine."
+sidebar_label: Caching
+description: This document describes the caching component of TDengine.
---
TDengine uses various kinds of caching techniques to efficiently write and query data. This document describes the caching component of TDengine.
diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md
index 699b3ebe5f..553a7b932b 100644
--- a/docs/en/07-develop/09-udf.md
+++ b/docs/en/07-develop/09-udf.md
@@ -1,7 +1,7 @@
---
-sidebar_label: UDF
title: User-Defined Functions (UDF)
-description: "You can define your own scalar and aggregate functions to expand the query capabilities of TDengine."
+sidebar_label: UDF
+description: This document describes how to create user-defined functions (UDF), your own scalar and aggregate functions that can expand the query capabilities of TDengine.
---
The built-in functions of TDengine may not be sufficient for the use cases of every application. In this case, you can define custom functions for use in TDengine queries. These are known as user-defined functions (UDF). A user-defined function takes one column of data or the result of a subquery as its input.
diff --git a/docs/en/07-develop/index.md b/docs/en/07-develop/index.md
index 34649d32a2..8f80b82b97 100644
--- a/docs/en/07-develop/index.md
+++ b/docs/en/07-develop/index.md
@@ -1,5 +1,6 @@
---
title: Developer Guide
+description: This document describes how to use the various components of TDengine from a developer's perspective.
---
Before creating an application to process time-series data with TDengine, consider the following:
diff --git a/docs/en/10-deployment/01-deploy.md b/docs/en/10-deployment/01-deploy.md
index 5dfcd3108d..da00e21a7e 100644
--- a/docs/en/10-deployment/01-deploy.md
+++ b/docs/en/10-deployment/01-deploy.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Manual Deployment
title: Manual Deployment and Management
+sidebar_label: Manual Deployment
+description: This document describes how to deploy TDengine on a server.
---
## Prerequisites
diff --git a/docs/en/10-deployment/03-k8s.md b/docs/en/10-deployment/03-k8s.md
index b0aa677713..49e61caafc 100644
--- a/docs/en/10-deployment/03-k8s.md
+++ b/docs/en/10-deployment/03-k8s.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Kubernetes
title: Deploying a TDengine Cluster in Kubernetes
+sidebar_label: Kubernetes
+description: This document describes how to deploy TDengine on Kubernetes.
---
TDengine is a cloud-native time-series database that can be deployed on Kubernetes. This document gives a step-by-step description of how you can use YAML files to create a TDengine cluster and introduces common operations for TDengine in a Kubernetes environment.
diff --git a/docs/en/10-deployment/05-helm.md b/docs/en/10-deployment/05-helm.md
index 90baa5f445..aa61717669 100644
--- a/docs/en/10-deployment/05-helm.md
+++ b/docs/en/10-deployment/05-helm.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Helm
title: Use Helm to deploy TDengine
+sidebar_label: Helm
+description: This document describes how to deploy TDengine on Kubernetes by using Helm.
---
Helm is a package manager for Kubernetes that can provide more capabilities in deploying on Kubernetes.
diff --git a/docs/en/10-deployment/index.md b/docs/en/10-deployment/index.md
index 7054a33e4a..865fbc2da5 100644
--- a/docs/en/10-deployment/index.md
+++ b/docs/en/10-deployment/index.md
@@ -1,5 +1,6 @@
---
title: Deployment
+description: This document describes how to deploy a TDengine cluster on a server, on Kubernetes, and by using Helm.
---
TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source.
diff --git a/docs/en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md
index 60046629a4..641fd3cbb7 100644
--- a/docs/en/12-taos-sql/01-data-type.md
+++ b/docs/en/12-taos-sql/01-data-type.md
@@ -1,7 +1,7 @@
---
-sidebar_label: Data Types
title: Data Types
-description: 'TDengine supports a variety of data types including timestamp, float, JSON and many others.'
+sidebar_label: Data Types
+description: This document describes the data types that TDengine supports.
---
## Timestamp
diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md
index 059f124ea5..280d72697c 100644
--- a/docs/en/12-taos-sql/02-database.md
+++ b/docs/en/12-taos-sql/02-database.md
@@ -1,7 +1,7 @@
---
-sidebar_label: Database
title: Database
-description: "create and drop database, show or change database parameters"
+sidebar_label: Database
+description: This document describes how to create and perform operations on databases.
---
## Create a Database
diff --git a/docs/en/12-taos-sql/03-table.md b/docs/en/12-taos-sql/03-table.md
index 6d12b8c730..f61d1f5147 100644
--- a/docs/en/12-taos-sql/03-table.md
+++ b/docs/en/12-taos-sql/03-table.md
@@ -1,5 +1,6 @@
---
title: Table
+description: This document describes how to create and perform operations on standard tables and subtables.
---
## Create Table
diff --git a/docs/en/12-taos-sql/04-stable.md b/docs/en/12-taos-sql/04-stable.md
index 8a7c713f8c..5b316d0d24 100644
--- a/docs/en/12-taos-sql/04-stable.md
+++ b/docs/en/12-taos-sql/04-stable.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Supertable
title: Supertable
+sidebar_label: Supertable
+description: This document describes how to create and perform operations on supertables.
---
## Create a Supertable
diff --git a/docs/en/12-taos-sql/05-insert.md b/docs/en/12-taos-sql/05-insert.md
index ca3154c25e..c22357abfa 100644
--- a/docs/en/12-taos-sql/05-insert.md
+++ b/docs/en/12-taos-sql/05-insert.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Insert
title: Insert
+sidebar_label: Insert
+description: This document describes how to insert data into TDengine.
---
## Syntax
diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md
index ee06a7be2d..183ab58726 100644
--- a/docs/en/12-taos-sql/06-select.md
+++ b/docs/en/12-taos-sql/06-select.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Select
title: Select
+sidebar_label: Select
+description: This document describes how to query data in TDengine.
---
## Syntax
diff --git a/docs/en/12-taos-sql/08-delete-data.mdx b/docs/en/12-taos-sql/08-delete-data.mdx
index 999c467ad0..f91a89a7eb 100644
--- a/docs/en/12-taos-sql/08-delete-data.mdx
+++ b/docs/en/12-taos-sql/08-delete-data.mdx
@@ -1,7 +1,7 @@
---
-sidebar_label: Delete Data
-description: "Delete data from table or Stable"
title: Delete Data
+sidebar_label: Delete Data
+description: This document describes how to delete data from TDengine.
---
TDengine provides the functionality of deleting data from a table or STable according to specified time range, it can be used to cleanup abnormal data generated due to device failure.
diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md
index 802eb259bf..b2d44f1a1d 100644
--- a/docs/en/12-taos-sql/10-function.md
+++ b/docs/en/12-taos-sql/10-function.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Functions
title: Functions
+sidebar_label: Functions
+description: This document describes the standard SQL functions available in TDengine.
toc_max_heading_level: 4
---
diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md
index 0763e85a53..536fd8ffc3 100644
--- a/docs/en/12-taos-sql/12-distinguished.md
+++ b/docs/en/12-taos-sql/12-distinguished.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Time-Series Extensions
title: Time-Series Extensions
+sidebar_label: Time-Series Extensions
+description: This document describes the extended functions specific to time-series data processing available in TDengine.
---
As a purpose-built database for storing and processing time-series data, TDengine provides time-series-specific extensions to standard SQL.
diff --git a/docs/en/12-taos-sql/13-tmq.md b/docs/en/12-taos-sql/13-tmq.md
index befab4f4f0..1a805c76fb 100644
--- a/docs/en/12-taos-sql/13-tmq.md
+++ b/docs/en/12-taos-sql/13-tmq.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Data Subscription
title: Data Subscription
+sidebar_label: Data Subscription
+description: This document describes the SQL statements related to the data subscription component of TDengine.
---
The information in this document is related to the TDengine data subscription feature.
diff --git a/docs/en/12-taos-sql/14-stream.md b/docs/en/12-taos-sql/14-stream.md
index e70e962668..b8f6c3a163 100644
--- a/docs/en/12-taos-sql/14-stream.md
+++ b/docs/en/12-taos-sql/14-stream.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Stream Processing
title: Stream Processing
+sidebar_label: Stream Processing
+description: This document describes the SQL statements related to the stream processing component of TDengine.
---
Raw time-series data is often cleaned and preprocessed before being permanently stored in a database. Stream processing components like Kafka, Flink, and Spark are often deployed alongside a time-series database to handle these operations, increasing system complexity and maintenance costs.
diff --git a/docs/en/12-taos-sql/16-operators.md b/docs/en/12-taos-sql/16-operators.md
index c426e28793..32ad4e7075 100644
--- a/docs/en/12-taos-sql/16-operators.md
+++ b/docs/en/12-taos-sql/16-operators.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Operators
title: Operators
+sidebar_label: Operators
+description: This document describes the SQL operators available in TDengine.
---
## Arithmetic Operators
diff --git a/docs/en/12-taos-sql/17-json.md b/docs/en/12-taos-sql/17-json.md
index 77f7743033..b2494e0cc1 100644
--- a/docs/en/12-taos-sql/17-json.md
+++ b/docs/en/12-taos-sql/17-json.md
@@ -1,6 +1,7 @@
---
-sidebar_label: JSON Type
title: JSON Type
+sidebar_label: JSON Type
+description: This document describes the JSON data type in TDengine.
---
diff --git a/docs/en/12-taos-sql/18-escape.md b/docs/en/12-taos-sql/18-escape.md
index a2ae40de98..85e4610e44 100644
--- a/docs/en/12-taos-sql/18-escape.md
+++ b/docs/en/12-taos-sql/18-escape.md
@@ -1,5 +1,6 @@
---
title: Escape Characters
+description: This document describes the usage of escape characters in TDengine.
---
## Escape Characters
diff --git a/docs/en/12-taos-sql/19-limit.md b/docs/en/12-taos-sql/19-limit.md
index f00ec90f57..654fae7560 100644
--- a/docs/en/12-taos-sql/19-limit.md
+++ b/docs/en/12-taos-sql/19-limit.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Name and Size Limits
title: Name and Size Limits
+sidebar_label: Name and Size Limits
+description: This document describes the name and size limits in TDengine.
---
## Naming Rules
diff --git a/docs/en/12-taos-sql/20-keywords.md b/docs/en/12-taos-sql/20-keywords.md
index 23f85947e3..a2191c87ee 100644
--- a/docs/en/12-taos-sql/20-keywords.md
+++ b/docs/en/12-taos-sql/20-keywords.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Reserved Keywords
title: Reserved Keywords
+sidebar_label: Reserved Keywords
+description: This document describes the reserved keywords in TDengine that cannot be used in object names.
---
## Keyword List
diff --git a/docs/en/12-taos-sql/21-node.md b/docs/en/12-taos-sql/21-node.md
index a0d49ab208..8a5069e66f 100644
--- a/docs/en/12-taos-sql/21-node.md
+++ b/docs/en/12-taos-sql/21-node.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Cluster
title: Cluster
+sidebar_label: Cluster
+description: This document describes the SQL statements related to cluster management in TDengine.
---
The physical entities that form TDengine clusters are known as data nodes (dnodes). Each dnode is a process running on the operating system of the physical machine. Dnodes can contain virtual nodes (vnodes), which store time-series data. Virtual nodes are formed into vgroups, which have 1 or 3 vnodes depending on the replica setting. If you want to enable replication on your cluster, it must contain at least three nodes. Dnodes can also contain management nodes (mnodes). Each cluster has up to three mnodes. Finally, dnodes can contain query nodes (qnodes), which compute time-series data, thus separating compute from storage. A single dnode can contain a vnode, qnode, and mnode.
diff --git a/docs/en/12-taos-sql/22-meta.md b/docs/en/12-taos-sql/22-meta.md
index 1cd759742a..d2bc72f047 100644
--- a/docs/en/12-taos-sql/22-meta.md
+++ b/docs/en/12-taos-sql/22-meta.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Metadata
title: Information_Schema Database
+sidebar_label: Metadata
+description: This document describes how to use the INFORMATION_SCHEMA database in TDengine.
---
TDengine includes a built-in database named `INFORMATION_SCHEMA` to provide access to database metadata, system information, and status information. This information includes database names, table names, and currently running SQL statements. All information related to TDengine maintenance is stored in this database. It contains several read-only tables. These tables are more accurately described as views, and they do not correspond to specific files. You can query these tables but cannot write data to them. The INFORMATION_SCHEMA database is intended to provide a unified method for SHOW commands to access data. However, using SELECT ... FROM INFORMATION_SCHEMA.tablename offers several advantages over SHOW commands:
diff --git a/docs/en/12-taos-sql/23-perf.md b/docs/en/12-taos-sql/23-perf.md
index 29cf3af6ab..fc369ec663 100644
--- a/docs/en/12-taos-sql/23-perf.md
+++ b/docs/en/12-taos-sql/23-perf.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Statistics
title: Performance_Schema Database
+sidebar_label: Statistics
+description: This document describes how to use the PERFORMANCE_SCHEMA database in TDengine.
---
TDengine includes a built-in database named `PERFORMANCE_SCHEMA` to provide access to database performance statistics. This document introduces the tables of PERFORMANCE_SCHEMA and their structure.
diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md
index f70d86570e..dc1db956a0 100644
--- a/docs/en/12-taos-sql/24-show.md
+++ b/docs/en/12-taos-sql/24-show.md
@@ -1,6 +1,7 @@
---
-sidebar_label: SHOW Statement
title: SHOW Statement for Metadata
+sidebar_label: SHOW Statement
+description: This document describes how to use the SHOW statement in TDengine.
---
`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md
index f895567c62..8b4c439352 100644
--- a/docs/en/12-taos-sql/25-grant.md
+++ b/docs/en/12-taos-sql/25-grant.md
@@ -1,7 +1,7 @@
---
-sidebar_label: Access Control
title: User and Access Control
-description: Manage user and user's permission
+sidebar_label: Access Control
+description: This document describes how to manage users and permissions in TDengine.
---
This document describes how to manage permissions in TDengine.
diff --git a/docs/en/12-taos-sql/26-udf.md b/docs/en/12-taos-sql/26-udf.md
index 977f3bcc08..249fcd3b54 100644
--- a/docs/en/12-taos-sql/26-udf.md
+++ b/docs/en/12-taos-sql/26-udf.md
@@ -1,6 +1,7 @@
---
-sidebar_label: User-Defined Functions
title: User-Defined Functions (UDF)
+sidebar_label: User-Defined Functions
+description: This document describes the SQL statements related to user-defined functions (UDF) in TDengine.
---
You can create user-defined functions and import them into TDengine.
diff --git a/docs/en/12-taos-sql/27-index.md b/docs/en/12-taos-sql/27-index.md
index 7d09bc43ab..7586e4af76 100644
--- a/docs/en/12-taos-sql/27-index.md
+++ b/docs/en/12-taos-sql/27-index.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Index
-title: Using Indices
+title: Indexing
+sidebar_label: Indexing
+description: This document describes the SQL statements related to indexing in TDengine.
---
TDengine supports SMA and FULLTEXT indexing.
diff --git a/docs/en/12-taos-sql/28-recovery.md b/docs/en/12-taos-sql/28-recovery.md
index e869ffc45f..b4da25ea0c 100644
--- a/docs/en/12-taos-sql/28-recovery.md
+++ b/docs/en/12-taos-sql/28-recovery.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Error Recovery
title: Error Recovery
+sidebar_label: Error Recovery
+description: This document describes the SQL statements related to error recovery in TDengine.
---
In a complex environment, connections and query tasks may encounter errors or fail to return in a reasonable time. If this occurs, you can terminate the connection or task.
diff --git a/docs/en/12-taos-sql/29-changes.md b/docs/en/12-taos-sql/29-changes.md
index f288cd7545..341791d675 100644
--- a/docs/en/12-taos-sql/29-changes.md
+++ b/docs/en/12-taos-sql/29-changes.md
@@ -1,7 +1,7 @@
---
-sidebar_label: Changes in TDengine 3.0
title: Changes in TDengine 3.0
-description: "This document explains how TDengine SQL has changed in version 3.0."
+sidebar_label: Changes in TDengine 3.0
+description: This document describes how TDengine SQL has changed in version 3.0 compared with previous versions.
---
## Basic SQL Elements
diff --git a/docs/en/12-taos-sql/index.md b/docs/en/12-taos-sql/index.md
index a5ffc9dc8d..276f84f21b 100644
--- a/docs/en/12-taos-sql/index.md
+++ b/docs/en/12-taos-sql/index.md
@@ -1,6 +1,6 @@
---
title: TDengine SQL
-description: 'The syntax supported by TDengine SQL '
+description: This document describes the syntax and functions supported by TDengine SQL.
---
This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. For information about how TDengine SQL has changed, see [Changes in TDengine 3.0](../taos-sql/changes).
diff --git a/docs/en/13-operation/01-pkg-install.md b/docs/en/13-operation/01-pkg-install.md
index d7713b943f..6e6c4aaebf 100644
--- a/docs/en/13-operation/01-pkg-install.md
+++ b/docs/en/13-operation/01-pkg-install.md
@@ -1,6 +1,6 @@
---
title: Install and Uninstall
-description: Install, Uninstall, Start, Stop and Upgrade
+description: This document describes how to install, upgrade, and uninstall TDengine.
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/13-operation/02-planning.mdx b/docs/en/13-operation/02-planning.mdx
index 2dffa7bb87..37ef6aae26 100644
--- a/docs/en/13-operation/02-planning.mdx
+++ b/docs/en/13-operation/02-planning.mdx
@@ -1,6 +1,7 @@
---
-sidebar_label: Resource Planning
title: Resource Planning
+sidebar_label: Resource Planning
+description: This document describes how to plan compute and storage resources for your TDengine cluster.
---
It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter.
diff --git a/docs/en/13-operation/03-tolerance.md b/docs/en/13-operation/03-tolerance.md
index 21a5a90282..4f33748e68 100644
--- a/docs/en/13-operation/03-tolerance.md
+++ b/docs/en/13-operation/03-tolerance.md
@@ -1,5 +1,6 @@
---
title: Fault Tolerance and Disaster Recovery
+description: This document describes how TDengine provides fault tolerance and disaster recovery.
---
## Fault Tolerance
diff --git a/docs/en/13-operation/07-import.md b/docs/en/13-operation/07-import.md
index 8362cec1ab..e95824e927 100644
--- a/docs/en/13-operation/07-import.md
+++ b/docs/en/13-operation/07-import.md
@@ -1,5 +1,6 @@
---
title: Data Import
+description: This document describes how to import data into TDengine.
---
There are multiple ways of importing data provided by TDengine: import with script, import from data file, import using `taosdump`.
diff --git a/docs/en/13-operation/08-export.md b/docs/en/13-operation/08-export.md
index 5780de42fa..bffda36e23 100644
--- a/docs/en/13-operation/08-export.md
+++ b/docs/en/13-operation/08-export.md
@@ -1,5 +1,6 @@
---
title: Data Export
+description: This document describes how to export data from TDengine.
---
There are two ways of exporting data from a TDengine cluster:
diff --git a/docs/en/13-operation/10-monitor.md b/docs/en/13-operation/10-monitor.md
index 74a5564a2a..346b874059 100644
--- a/docs/en/13-operation/10-monitor.md
+++ b/docs/en/13-operation/10-monitor.md
@@ -1,5 +1,6 @@
---
title: TDengine Monitoring
+description: This document describes how to monitor your TDengine cluster.
---
After TDengine is started, it automatically writes monitoring data including CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, into a designated database at a predefined interval through taosKeeper. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
diff --git a/docs/en/13-operation/17-diagnose.md b/docs/en/13-operation/17-diagnose.md
index fa202a23ea..9d42b3ebbc 100644
--- a/docs/en/13-operation/17-diagnose.md
+++ b/docs/en/13-operation/17-diagnose.md
@@ -1,5 +1,6 @@
---
title: Problem Diagnostics
+description: This document describes how to diagnose issues with your TDengine cluster.
---
## Network Connection Diagnostics
diff --git a/docs/en/13-operation/index.md b/docs/en/13-operation/index.md
index c64749c40e..8b386dc19a 100644
--- a/docs/en/13-operation/index.md
+++ b/docs/en/13-operation/index.md
@@ -1,5 +1,6 @@
---
title: Administration
+description: This document describes how to perform management operations on your TDengine cluster from an administrator's perspective.
---
This chapter is mainly written for system administrators. It covers download, install/uninstall, data import/export, system monitoring, user management, connection management, capacity planning and system optimization.
diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx
index 09e40b956f..b138d69bfc 100644
--- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx
+++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx
@@ -1,5 +1,6 @@
---
title: REST API
+description: This document describes the TDengine REST API.
---
To support the development of various types of applications and platforms, TDengine provides an API that conforms to REST principles; namely REST API. To minimize the learning cost, unlike REST APIs for other database engines, TDengine allows insertion of SQL commands in the BODY of an HTTP POST request, to operate the database.
diff --git a/docs/en/14-reference/03-connector/03-cpp.mdx b/docs/en/14-reference/03-connector/03-cpp.mdx
index 906d56ab15..3bd7b7f4c6 100644
--- a/docs/en/14-reference/03-connector/03-cpp.mdx
+++ b/docs/en/14-reference/03-connector/03-cpp.mdx
@@ -1,6 +1,7 @@
---
-sidebar_label: C/C++
title: C/C++ Connector
+sidebar_label: C/C++
+description: This document describes the TDengine C/C++ connector.
---
C/C++ developers can use TDengine's client driver and the C/C++ connector, to develop their applications to connect to TDengine clusters for data writing, querying, and other functions. To use the C/C++ connector you must include the TDengine header file _taos.h_, which lists the function prototypes of the provided APIs. The application also needs to link to the corresponding dynamic libraries on the platform where it is located.
diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx
index c37738b3f8..61ce166069 100644
--- a/docs/en/14-reference/03-connector/04-java.mdx
+++ b/docs/en/14-reference/03-connector/04-java.mdx
@@ -1,8 +1,8 @@
---
-toc_max_heading_level: 4
-sidebar_label: Java
title: TDengine Java Connector
-description: The TDengine Java Connector is implemented on the standard JDBC API and provides native and REST connectors.
+sidebar_label: Java
+description: This document describes the TDengine Java Connector.
+toc_max_heading_level: 4
---
import Tabs from '@theme/Tabs';
diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/14-reference/03-connector/05-go.mdx
index 60407c0735..da2f54708f 100644
--- a/docs/en/14-reference/03-connector/05-go.mdx
+++ b/docs/en/14-reference/03-connector/05-go.mdx
@@ -1,7 +1,8 @@
---
-toc_max_heading_level: 4
-sidebar_label: Go
title: TDengine Go Connector
+sidebar_label: Go
+description: This document describes the TDengine Go connector.
+toc_max_heading_level: 4
---
import Tabs from '@theme/Tabs';
diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx
index 4e2a7848dc..6e84859610 100644
--- a/docs/en/14-reference/03-connector/06-rust.mdx
+++ b/docs/en/14-reference/03-connector/06-rust.mdx
@@ -1,7 +1,8 @@
---
-toc_max_heading_level: 4
-sidebar_label: Rust
title: TDengine Rust Connector
+sidebar_label: Rust
+description: This document describes the TDengine Rust connector.
+toc_max_heading_level: 4
---
import Tabs from '@theme/Tabs';
diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx
index d593c3f133..146da268a8 100644
--- a/docs/en/14-reference/03-connector/07-python.mdx
+++ b/docs/en/14-reference/03-connector/07-python.mdx
@@ -1,7 +1,7 @@
---
-sidebar_label: Python
title: TDengine Python Connector
-description: "taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. tasopy wraps both the native and REST interfaces of TDengine, corresponding to the two submodules of tasopy: taos and taosrest. In addition to wrapping the native and REST interfaces, taospy also provides a programming interface that conforms to the Python Data Access Specification (PEP 249), making it easy to integrate taospy with many third-party tools, such as SQLAlchemy and pandas."
+sidebar_label: Python
+description: This document describes taospy, the TDengine Python connector.
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/14-reference/03-connector/08-node.mdx b/docs/en/14-reference/03-connector/08-node.mdx
index a36cf0efc9..1ac8f69d06 100644
--- a/docs/en/14-reference/03-connector/08-node.mdx
+++ b/docs/en/14-reference/03-connector/08-node.mdx
@@ -1,7 +1,8 @@
---
-toc_max_heading_level: 4
-sidebar_label: Node.js
title: TDengine Node.js Connector
+sidebar_label: Node.js
+description: This document describes the TDengine Node.js connector.
+toc_max_heading_level: 4
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/14-reference/03-connector/09-csharp.mdx b/docs/en/14-reference/03-connector/09-csharp.mdx
index 756e948bd2..e984967fb7 100644
--- a/docs/en/14-reference/03-connector/09-csharp.mdx
+++ b/docs/en/14-reference/03-connector/09-csharp.mdx
@@ -1,7 +1,8 @@
---
-toc_max_heading_level: 4
-sidebar_label: C#
title: C# Connector
+sidebar_label: C#
+description: This document describes the TDengine C# connector.
+toc_max_heading_level: 4
---
import Tabs from '@theme/Tabs';
diff --git a/docs/en/14-reference/03-connector/10-php.mdx b/docs/en/14-reference/03-connector/10-php.mdx
index 87f8616f9e..fd00d11239 100644
--- a/docs/en/14-reference/03-connector/10-php.mdx
+++ b/docs/en/14-reference/03-connector/10-php.mdx
@@ -1,6 +1,7 @@
---
-sidebar_label: PHP
title: PHP Connector
+sidebar_label: PHP
+description: This document describes the TDengine PHP connector.
---
`php-tdengine` is the TDengine PHP connector provided by TDengine community. In particular, it supports Swoole coroutine.
diff --git a/docs/en/14-reference/03-connector/index.mdx b/docs/en/14-reference/03-connector/index.mdx
index da3aae8309..2df73b8592 100644
--- a/docs/en/14-reference/03-connector/index.mdx
+++ b/docs/en/14-reference/03-connector/index.mdx
@@ -1,5 +1,6 @@
---
title: Connector
+description: This document describes the connectors that TDengine provides to interface with various programming languages.
---
TDengine provides a rich set of APIs (application development interface). To facilitate users to develop their applications quickly, TDengine supports connectors for multiple programming languages, including official connectors for C/C++, Java, Python, Go, Node.js, C#, and Rust. These connectors support connecting to TDengine clusters using both native interfaces (taosc) and REST interfaces (not supported in a few languages yet). Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
diff --git a/docs/en/14-reference/04-taosadapter.md b/docs/en/14-reference/04-taosadapter.md
index 9eb6cb9213..c1ec97b647 100644
--- a/docs/en/14-reference/04-taosadapter.md
+++ b/docs/en/14-reference/04-taosadapter.md
@@ -1,7 +1,7 @@
---
-title: "taosAdapter"
-description: "taosAdapter is a TDengine companion tool that acts as a bridge and adapter between TDengine clusters and applications. It provides an easy-to-use and efficient way to ingest data directly from data collection agent software such as Telegraf, StatsD, collectd, etc. It also provides an InfluxDB/OpenTSDB compatible data ingestion interface, allowing InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine."
-sidebar_label: "taosAdapter"
+title: taosAdapter
+sidebar_label: taosAdapter
+description: This document describes how to use taosAdapter, a TDengine companion tool that acts as a bridge and adapter between TDengine clusters and applications.
---
import Prometheus from "./_prometheus.mdx"
diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md
index 4017b12be9..9828d71ece 100644
--- a/docs/en/14-reference/05-taosbenchmark.md
+++ b/docs/en/14-reference/05-taosbenchmark.md
@@ -1,8 +1,8 @@
---
title: taosBenchmark
sidebar_label: taosBenchmark
+description: This document describes how to use taosBenchmark, a tool for testing the performance of TDengine.
toc_max_heading_level: 4
-description: "taosBenchmark (once called taosdemo ) is a tool for testing the performance of TDengine."
---
# Introduction
diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md
index 9c63b4dc03..dcfc068a3d 100644
--- a/docs/en/14-reference/06-taosdump.md
+++ b/docs/en/14-reference/06-taosdump.md
@@ -1,6 +1,6 @@
---
title: taosdump
-description: "taosdump is a tool that supports backing up data from a running TDengine cluster and restoring the backed up data to the same, or another running TDengine cluster."
+description: This document describes how to use taosdump, a tool for backing up and restoring the data in a TDengine cluster.
---
## Introduction
diff --git a/docs/en/14-reference/07-tdinsight/index.md b/docs/en/14-reference/07-tdinsight/index.md
index d03c16a8bc..1c58dd6a76 100644
--- a/docs/en/14-reference/07-tdinsight/index.md
+++ b/docs/en/14-reference/07-tdinsight/index.md
@@ -1,6 +1,7 @@
---
title: TDinsight - Grafana-based Zero-Dependency Monitoring Solution for TDengine
sidebar_label: TDinsight
+description: This document describes TDinsight, a monitoring solution for TDengine.
---
TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana].
diff --git a/docs/en/14-reference/08-taos-shell.md b/docs/en/14-reference/08-taos-shell.md
index 68e2f08765..7833ac861f 100644
--- a/docs/en/14-reference/08-taos-shell.md
+++ b/docs/en/14-reference/08-taos-shell.md
@@ -1,7 +1,7 @@
---
title: TDengine Command Line Interface (CLI)
sidebar_label: Command Line Interface
-description: Instructions and tips for using the TDengine CLI
+description: This document describes how to use the TDengine CLI.
---
The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is the simplest way for users to manipulate and interact with TDengine instances.
diff --git a/docs/en/14-reference/09-support-platform/index.md b/docs/en/14-reference/09-support-platform/index.md
index 061294f016..7dfa8ac93a 100644
--- a/docs/en/14-reference/09-support-platform/index.md
+++ b/docs/en/14-reference/09-support-platform/index.md
@@ -1,6 +1,6 @@
---
title: List of supported platforms
-description: "List of platforms supported by TDengine server, client, and connector"
+description: This document describes the supported platforms for the TDengine server, client, and connectors.
---
## List of supported platforms for TDengine server
diff --git a/docs/en/14-reference/11-docker/index.md b/docs/en/14-reference/11-docker/index.md
index 89987c456f..b9278c6961 100644
--- a/docs/en/14-reference/11-docker/index.md
+++ b/docs/en/14-reference/11-docker/index.md
@@ -1,6 +1,6 @@
---
title: Deploying TDengine with Docker
-description: "This chapter focuses on starting the TDengine service in a container and accessing it."
+description: This chapter describes how to start and access TDengine in a Docker container.
---
This chapter describes how to start the TDengine service in a container and access it. Users can control the behavior of the service in the container by using environment variables on the docker run command-line or in the docker-compose file.
diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md
index 9e56a0b0bf..afa9f5a8ae 100644
--- a/docs/en/14-reference/12-config/index.md
+++ b/docs/en/14-reference/12-config/index.md
@@ -1,6 +1,6 @@
---
title: Configuration Parameters
-description: "Configuration parameters for client and server in TDengine"
+description: This document describes the configuration parameters for the TDengine server and client.
---
## Configuration File on Server Side
@@ -162,11 +162,7 @@ The parameters described in this document by the effect that they have on the sy
| Meaning | Execution policy for query statements |
| Unit | None |
| Default | 1 |
-| Value Range | 1: Run queries on vnodes and not on qnodes
-
-2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes.
-
-3: Only run scan operators on vnodes; run all other operators on qnodes. |
+| Value Range | 1: Run queries on vnodes and not on qnodes; 2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes; 3: Only run scan operators on vnodes, and run all other operators on qnodes. |
### querySmaOptimize
@@ -176,11 +172,7 @@ The parameters described in this document by the effect that they have on the sy
| Meaning | SMA index optimization policy |
| Unit | None |
| Default Value | 0 |
-| Notes |
-
-0: Disable SMA indexing and perform all queries on non-indexed data.
-
-1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
+| Notes |0: Disable SMA indexing and perform all queries on non-indexed data; 1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
### countAlwaysReturnValue
diff --git a/docs/en/14-reference/12-directory.md b/docs/en/14-reference/12-directory.md
index 19b036418f..651892c8b2 100644
--- a/docs/en/14-reference/12-directory.md
+++ b/docs/en/14-reference/12-directory.md
@@ -1,6 +1,6 @@
---
title: File directory structure
-description: "TDengine installation directory description"
+description: This document describes the structure of the TDengine directory after installation.
---
After TDengine is installed, the following directories or files will be created in the system by default.
diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md
index a97a54af02..caedd76df8 100644
--- a/docs/en/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/en/14-reference/13-schemaless/13-schemaless.md
@@ -1,6 +1,6 @@
---
title: Schemaless Writing
-description: 'The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface.'
+description: This document describes how to use the schemaless write component of TDengine.
---
In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing
diff --git a/docs/en/14-reference/14-taosKeeper.md b/docs/en/14-reference/14-taosKeeper.md
index 665bc75380..0b4a1fbd60 100644
--- a/docs/en/14-reference/14-taosKeeper.md
+++ b/docs/en/14-reference/14-taosKeeper.md
@@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
-description: exports TDengine monitoring metrics.
+description: This document describes how to use taosKeeper, a tool for exporting TDengine monitoring metrics.
---
## Introduction
diff --git a/docs/en/14-reference/index.md b/docs/en/14-reference/index.md
index f3a64913d0..bc8ec69965 100644
--- a/docs/en/14-reference/index.md
+++ b/docs/en/14-reference/index.md
@@ -1,5 +1,6 @@
---
title: Reference
+description: This document describes TDengine connectors and utilities.
---
This section describes the TDengine connectors and utilities.
diff --git a/docs/en/20-third-party/01-grafana.mdx b/docs/en/20-third-party/01-grafana.mdx
index ca32ce8afc..b33e1c1199 100644
--- a/docs/en/20-third-party/01-grafana.mdx
+++ b/docs/en/20-third-party/01-grafana.mdx
@@ -1,6 +1,7 @@
---
-sidebar_label: Grafana
title: Grafana
+sidebar_label: Grafana
+description: This document describes how to integrate TDengine with Grafana.
---
import Tabs from "@theme/Tabs";
diff --git a/docs/en/20-third-party/02-prometheus.md b/docs/en/20-third-party/02-prometheus.md
index ef9b9cb637..bfdd3d015e 100644
--- a/docs/en/20-third-party/02-prometheus.md
+++ b/docs/en/20-third-party/02-prometheus.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Prometheus
title: Prometheus writing and reading
+sidebar_label: Prometheus
+description: This document describes how to integrate TDengine with Prometheus.
---
import Prometheus from "../14-reference/_prometheus.mdx"
diff --git a/docs/en/20-third-party/03-telegraf.md b/docs/en/20-third-party/03-telegraf.md
index 8f3cab0e57..7e99b84eab 100644
--- a/docs/en/20-third-party/03-telegraf.md
+++ b/docs/en/20-third-party/03-telegraf.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Telegraf
title: Telegraf writing
+sidebar_label: Telegraf
+description: This document describes how to integrate TDengine with Telegraf.
---
import Telegraf from "../14-reference/_telegraf.mdx"
diff --git a/docs/en/20-third-party/05-collectd.md b/docs/en/20-third-party/05-collectd.md
index 5b52e3b7bc..d8c8e7f81d 100644
--- a/docs/en/20-third-party/05-collectd.md
+++ b/docs/en/20-third-party/05-collectd.md
@@ -1,6 +1,7 @@
---
-sidebar_label: collectd
title: collectd writing
+sidebar_label: collectd
+description: This document describes how to integrate TDengine with collectd.
---
import CollectD from "../14-reference/_collectd.mdx"
diff --git a/docs/en/20-third-party/06-statsd.md b/docs/en/20-third-party/06-statsd.md
index b861a48ecd..ea428e9cdb 100644
--- a/docs/en/20-third-party/06-statsd.md
+++ b/docs/en/20-third-party/06-statsd.md
@@ -1,6 +1,7 @@
---
-sidebar_label: StatsD
title: StatsD Writing
+sidebar_label: StatsD
+description: This document describes how to integrate TDengine with StatsD.
---
import StatsD from "../14-reference/_statsd.mdx"
diff --git a/docs/en/20-third-party/07-icinga2.md b/docs/en/20-third-party/07-icinga2.md
index 167b6a4303..540aae8689 100644
--- a/docs/en/20-third-party/07-icinga2.md
+++ b/docs/en/20-third-party/07-icinga2.md
@@ -1,6 +1,7 @@
---
-sidebar_label: icinga2
title: icinga2 writing
+sidebar_label: icinga2
+description: This document describes how to integrate TDengine with icinga2.
---
import Icinga2 from "../14-reference/_icinga2.mdx"
diff --git a/docs/en/20-third-party/08-tcollector.md b/docs/en/20-third-party/08-tcollector.md
index b604a2d712..f1c0ecd44d 100644
--- a/docs/en/20-third-party/08-tcollector.md
+++ b/docs/en/20-third-party/08-tcollector.md
@@ -1,6 +1,7 @@
---
-sidebar_label: TCollector
title: TCollector writing
+sidebar_label: TCollector
+description: This document describes how to integrate TDengine with TCollector.
---
import TCollector from "../14-reference/_tcollector.mdx"
diff --git a/docs/en/20-third-party/09-emq-broker.md b/docs/en/20-third-party/09-emq-broker.md
index 2ead1bbaf4..10ce0174ed 100644
--- a/docs/en/20-third-party/09-emq-broker.md
+++ b/docs/en/20-third-party/09-emq-broker.md
@@ -1,6 +1,7 @@
---
-sidebar_label: EMQX Broker
title: EMQX Broker writing
+sidebar_label: EMQX Broker
+description: This document describes how to integrate TDengine with the EMQX broker.
---
MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emqx) is an open-source MQTT Broker software. You can write MQTT data directly to TDengine without any code. You only need to setup "rules" in EMQX Dashboard to create a simple configuration. EMQX supports saving data to TDengine by sending data to a web service and provides a native TDengine driver for direct saving in the Enterprise Edition. Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use it.).
diff --git a/docs/en/20-third-party/10-hive-mq-broker.md b/docs/en/20-third-party/10-hive-mq-broker.md
index 828a62ac5b..6c5165596e 100644
--- a/docs/en/20-third-party/10-hive-mq-broker.md
+++ b/docs/en/20-third-party/10-hive-mq-broker.md
@@ -1,6 +1,7 @@
---
-sidebar_label: HiveMQ Broker
title: HiveMQ Broker Writing
+sidebar_label: HiveMQ Broker
+description: This document describes how to integrate TDengine with the HiveMQ broker.
---
[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. For more information, see [HiveMQ TDengine Extension](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README_EN.md).
diff --git a/docs/en/20-third-party/11-kafka.md b/docs/en/20-third-party/11-kafka.md
index 3e8f7c295d..0998862b2d 100644
--- a/docs/en/20-third-party/11-kafka.md
+++ b/docs/en/20-third-party/11-kafka.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Kafka
title: TDengine Kafka Connector Tutorial
+sidebar_label: Kafka
+description: This document describes how to integrate TDengine with Kafka.
---
TDengine Kafka Connector contains two plugins: TDengine Source Connector and TDengine Sink Connector. Users only need to provide a simple configuration file to synchronize the data of the specified topic in Kafka (batch or real-time) to TDengine or synchronize the data (batch or real-time) of the specified database in TDengine to Kafka.
diff --git a/docs/en/20-third-party/12-google-data-studio.md b/docs/en/20-third-party/12-google-data-studio.md
index fc94f98056..ea6431fa5a 100644
--- a/docs/en/20-third-party/12-google-data-studio.md
+++ b/docs/en/20-third-party/12-google-data-studio.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Google Data Studio
title: Use Google Data Studio to access TDengine
+sidebar_label: Google Data Studio
+description: This document describes how to integrate TDengine with Google Data Studio.
---
Data Studio is a powerful tool for reporting and visualization, offering a wide variety of charts and connectors and making it easy to generate reports based on predefined templates. Its ease of use and robust ecosystem have made it one of the first choices for people working in data analysis.
diff --git a/docs/en/20-third-party/13-Jupyter.md b/docs/en/20-third-party/13-Jupyter.md
index fbd7e530f0..1ac9df1da4 100644
--- a/docs/en/20-third-party/13-Jupyter.md
+++ b/docs/en/20-third-party/13-Jupyter.md
@@ -1,6 +1,7 @@
---
-sidebar_label: JupyterLab
title: Connect JupyterLab to TDengine
+sidebar_label: JupyterLab
+description: This document describes how to integrate TDengine with JupyterLab.
---
JupyterLab is the next generation of the ubiquitous Jupyter Notebook. In this note we show you how to install the TDengine Python connector to connect to TDengine in JupyterLab. You can then insert data and perform queries against the TDengine instance within JupyterLab.
diff --git a/docs/en/20-third-party/index.md b/docs/en/20-third-party/index.md
index 87bd9e0751..6fc8043eef 100644
--- a/docs/en/20-third-party/index.md
+++ b/docs/en/20-third-party/index.md
@@ -1,5 +1,6 @@
---
title: Third Party Tools
+description: This document describes how to integrate TDengine with various third-party tools.
---
Since TDengine supports standard SQL commands, common database connector standards (e.g., JDBC), ORM, and other popular time-series database writing protocols (e.g., InfluxDB Line Protocol, OpenTSDB JSON, OpenTSDB Telnet, etc.), it is very easy to integrate TDengine with other third party tools. You only need to provide simple configuration, the integration can be done without a line of code.
diff --git a/docs/en/21-tdinternal/01-arch.md b/docs/en/21-tdinternal/01-arch.md
index 697ecb98a1..cef05dcc56 100644
--- a/docs/en/21-tdinternal/01-arch.md
+++ b/docs/en/21-tdinternal/01-arch.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Architecture
title: Architecture
+sidebar_label: Architecture
+description: This document describes the architecture of TDengine.
---
## Cluster and Primary Logic Unit
diff --git a/docs/en/21-tdinternal/03-high-availability.md b/docs/en/21-tdinternal/03-high-availability.md
index e2e1c6521e..a0f6ca4ffe 100644
--- a/docs/en/21-tdinternal/03-high-availability.md
+++ b/docs/en/21-tdinternal/03-high-availability.md
@@ -1,6 +1,7 @@
---
-sidebar_label: High Availability
title: High Availability
+sidebar_label: High Availability
+description: This document describes how TDengine implements high availability.
---
## High Availability of Vnode
diff --git a/docs/en/21-tdinternal/04-load-balance.md b/docs/en/21-tdinternal/04-load-balance.md
index 7648398059..73da1c1dd6 100644
--- a/docs/en/21-tdinternal/04-load-balance.md
+++ b/docs/en/21-tdinternal/04-load-balance.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Load Balance
title: Load Balance
+sidebar_label: Load Balance
+description: This document describes how TDengine implements load balancing.
---
The load balance in TDengine is mainly about processing data series data. TDengine employes builtin hash algorithm to distribute all the tables, sub-tables and their data of a database across all the vgroups that belongs to the database. Each table or sub-table can only be handled by a single vgroup, while each vgroup can process multiple table or sub-table.
diff --git a/docs/en/21-tdinternal/index.md b/docs/en/21-tdinternal/index.md
index 999d6f89ff..4f08467543 100644
--- a/docs/en/21-tdinternal/index.md
+++ b/docs/en/21-tdinternal/index.md
@@ -1,5 +1,6 @@
---
title: TDengine Inside
+description: This document describes the internals of TDengine from an architectural perspective.
---
```mdx-code-block
diff --git a/docs/en/25-application/01-telegraf.md b/docs/en/25-application/01-telegraf.md
index 65fb08ee67..e043aebcb6 100644
--- a/docs/en/25-application/01-telegraf.md
+++ b/docs/en/25-application/01-telegraf.md
@@ -1,6 +1,7 @@
---
-sidebar_label: TDengine + Telegraf + Grafana
title: Quickly Build IT DevOps Visualization System with TDengine + Telegraf + Grafana
+sidebar_label: TDengine + Telegraf + Grafana
+description: This document describes how to create an IT visualization system by integrating TDengine with Telegraf and Grafana.
---
## Background
diff --git a/docs/en/25-application/02-collectd.md b/docs/en/25-application/02-collectd.md
index 97412b2309..6ac7253fc4 100644
--- a/docs/en/25-application/02-collectd.md
+++ b/docs/en/25-application/02-collectd.md
@@ -1,6 +1,7 @@
---
-sidebar_label: TDengine + collectd/StatsD + Grafana
title: Quickly build an IT DevOps visualization system using TDengine + collectd/StatsD + Grafana
+sidebar_label: TDengine + collectd/StatsD + Grafana
+description: This document describes how to build an IT visualization system by integrating TDengine with Grafana and collectd or StatsD.
---
## Background
diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/03-immigrate.md
index 1aabaa43e7..30d069e4e2 100644
--- a/docs/en/25-application/03-immigrate.md
+++ b/docs/en/25-application/03-immigrate.md
@@ -1,6 +1,7 @@
---
-sidebar_label: OpenTSDB Migration to TDengine
title: Best Practices for Migrating OpenTSDB Applications to TDengine
+sidebar_label: OpenTSDB Migration to TDengine
+description: This document describes the best practices for migrating an OpenTSDB application to TDengine.
---
As a distributed, scalable, distributed time-series database platform based on HBase, and thanks to its first-mover advantage, OpenTSDB is widely used for monitoring in DevOps. However, as new technologies like cloud computing, microservices, and containerization technology has developed rapidly, Enterprise-level services are becoming more and more diverse and the architecture is becoming more complex.
diff --git a/docs/en/25-application/index.md b/docs/en/25-application/index.md
index 5383a00c67..178fef47b6 100644
--- a/docs/en/25-application/index.md
+++ b/docs/en/25-application/index.md
@@ -1,5 +1,6 @@
---
title: Application Practice
+description: This document describes some examples of building systems around TDengine.
---
```mdx-code-block
diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md
index 7650e97365..9e7f4f8e0d 100644
--- a/docs/en/27-train-faq/01-faq.md
+++ b/docs/en/27-train-faq/01-faq.md
@@ -1,5 +1,6 @@
---
title: Frequently Asked Questions
+description: This document describes the frequently asked questions about TDengine.
---
## Submit an Issue
diff --git a/docs/en/27-train-faq/index.md b/docs/en/27-train-faq/index.md
index 2cb87aab00..cc55a41559 100644
--- a/docs/en/27-train-faq/index.md
+++ b/docs/en/27-train-faq/index.md
@@ -1,5 +1,6 @@
---
title: FAQ & Others
+description: This document describes common issues related with TDengine.
---
```mdx-code-block
diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md
index 83ea3eb5e6..a6a28e04c4 100644
--- a/docs/en/28-releases/01-tdengine.md
+++ b/docs/en/28-releases/01-tdengine.md
@@ -1,7 +1,7 @@
---
-sidebar_label: TDengine
title: TDengine Release History and Download Links
-description: TDengine release history, Release Notes and download links.
+sidebar_label: TDengine
+description: This document provides download links for all released versions of TDengine 3.0.
---
TDengine 3.x installation packages can be downloaded at the following links:
diff --git a/docs/en/28-releases/02-tools.md b/docs/en/28-releases/02-tools.md
index 97fed654f2..3c1dc32f8d 100644
--- a/docs/en/28-releases/02-tools.md
+++ b/docs/en/28-releases/02-tools.md
@@ -1,7 +1,7 @@
---
-sidebar_label: taosTools
title: taosTools Release History and Download Links
-description: taosTools release history, Release Notes, download links.
+sidebar_label: taosTools
+description: This document provides download links for all released versions of taosTools compatible with TDengine 3.0.
---
taosTools installation packages can be downloaded at the following links:
diff --git a/docs/en/28-releases/index.md b/docs/en/28-releases/index.md
index c01c99cdce..d1f93c3b9b 100644
--- a/docs/en/28-releases/index.md
+++ b/docs/en/28-releases/index.md
@@ -1,5 +1,6 @@
---
title: Releases
+description: This document describes TDengine products that have been released.
---
```mdx-code-block
diff --git a/docs/examples/csharp/wsConnect/wsConnect.csproj b/docs/examples/csharp/wsConnect/wsConnect.csproj
index 6d78be6e7a..c7988b6e9c 100644
--- a/docs/examples/csharp/wsConnect/wsConnect.csproj
+++ b/docs/examples/csharp/wsConnect/wsConnect.csproj
@@ -2,7 +2,7 @@
Exe
- net5.0
+ net6.0
diff --git a/docs/examples/csharp/wsInsert/wsInsert.csproj b/docs/examples/csharp/wsInsert/wsInsert.csproj
index 95bfbdea3d..5aa419b2c8 100644
--- a/docs/examples/csharp/wsInsert/wsInsert.csproj
+++ b/docs/examples/csharp/wsInsert/wsInsert.csproj
@@ -2,7 +2,7 @@
Exe
- net5.0
+ net6.0
enable
diff --git a/docs/examples/csharp/wsQuery/wsQuery.csproj b/docs/examples/csharp/wsQuery/wsQuery.csproj
index e5c2cf767c..bcc7c19a59 100644
--- a/docs/examples/csharp/wsQuery/wsQuery.csproj
+++ b/docs/examples/csharp/wsQuery/wsQuery.csproj
@@ -2,7 +2,7 @@
Exe
- net5.0
+ net6.0
enable
diff --git a/docs/examples/csharp/wsStmt/wsStmt.csproj b/docs/examples/csharp/wsStmt/wsStmt.csproj
index e5c2cf767c..bcc7c19a59 100644
--- a/docs/examples/csharp/wsStmt/wsStmt.csproj
+++ b/docs/examples/csharp/wsStmt/wsStmt.csproj
@@ -2,7 +2,7 @@
Exe
- net5.0
+ net6.0
enable
diff --git a/docs/examples/node/restexample/connect.js b/docs/examples/node/restexample/connect.js
index bb027d4fe8..f36472b4c0 100644
--- a/docs/examples/node/restexample/connect.js
+++ b/docs/examples/node/restexample/connect.js
@@ -3,18 +3,14 @@ const { options, connect } = require("@tdengine/rest");
async function test() {
options.path = "/rest/sql";
options.host = "localhost";
+ options.port = 6041;
let conn = connect(options);
let cursor = conn.cursor();
try {
let res = await cursor.query("SELECT server_version()");
- res.toString();
+ console.log("res.getResult()",res.getResult());
} catch (err) {
console.log(err);
}
}
test();
-
-// output:
-// server_version() |
-// ===================
-// 3.0.0.0 |
diff --git a/docs/examples/python/connection_usage_native_reference.py b/docs/examples/python/connection_usage_native_reference.py
index 8b754ec722..0a23c5f95b 100644
--- a/docs/examples/python/connection_usage_native_reference.py
+++ b/docs/examples/python/connection_usage_native_reference.py
@@ -42,4 +42,4 @@ print(data)
# ANCHOR_END: query
-conn.close()
+conn.close()
\ No newline at end of file
diff --git a/docs/examples/python/kafka_example.py b/docs/examples/python/kafka_example.py
index 43f9183f7e..5b81706ef7 100644
--- a/docs/examples/python/kafka_example.py
+++ b/docs/examples/python/kafka_example.py
@@ -106,8 +106,8 @@ class Consumer(object):
for task in self.tasks:
while not task.done():
pass
- if self.pool is not None:
- self.pool.shutdown()
+ if self.pool is not None:
+ self.pool.shutdown()
# clean data
if self.config.get('clean_after_testing'):
diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md
index fc35da8636..b0a84af6dc 100644
--- a/docs/zh/12-taos-sql/02-database.md
+++ b/docs/zh/12-taos-sql/02-database.md
@@ -142,7 +142,7 @@ SHOW DATABASES;
### 显示一个数据库的创建语句
```
-SHOW CREATE DATABASE db_name;
+SHOW CREATE DATABASE db_name \G;
```
常用于数据库迁移。对一个已经存在的数据库,返回其创建语句;在另一个集群中执行该语句,就能得到一个设置完全相同的 Database。
@@ -150,7 +150,7 @@ SHOW CREATE DATABASE db_name;
### 查看数据库参数
```sql
-SELECT * FROM INFORMATION_SCHEMA.INS_DATABASES WHERE NAME='DBNAME' \G;
+SELECT * FROM INFORMATION_SCHEMA.INS_DATABASES WHERE NAME='db_name' \G;
```
会列出指定数据库的配置参数,并且每行只显示一个参数。
@@ -177,4 +177,4 @@ REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3
BALANCE VGROUP
```
-自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。
\ No newline at end of file
+自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
index 503f692764..28b409de30 100644
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -162,11 +162,7 @@ taos --dump-config
| 含义 | 查询语句的执行策略 |
| 单位 | 无 |
| 缺省值 | 1 |
-| 补充说明 | 1: 只使用 vnode,不使用 qnode |
-
-2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行
-
-3: vnode 只运行扫描算子,其余算子均在 qnode 执行 |
+| 补充说明 | 1: 只使用 vnode,不使用 qnode; 2: 没有扫描算子的子任务在 qnode 执行,带扫描算子的子任务在 vnode 执行; 3: vnode 只运行扫描算子,其余算子均在 qnode 执行 |
### querySmaOptimize
@@ -176,11 +172,7 @@ taos --dump-config
| 含义 | sma index 的优化策略 |
| 单位 | 无 |
| 缺省值 | 0 |
-| 补充说明 |
-
-0: 表示不使用 sma index,永远从原始数据进行查询
-
-1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询 |
+| 补充说明 |0: 表示不使用 sma index,永远从原始数据进行查询; 1: 表示使用 sma index,对符合的语句,直接从预计算的结果进行查询 |
### maxNumOfDistinctRes
@@ -389,7 +381,7 @@ charset 的有效值是 UTF-8。
| 属性 | 说明 |
| -------- | -------------------------------------------- |
| 适用范围 | 服务端和客户端均适用 |
-| 含义 | 当日志文件夹的磁盘大小小于该值时,停止写日志 |
+| 含义 | 当日志文件夹所在磁盘可用空间大小小于该值时,停止写日志 |
| 单位 | GB |
| 缺省值 | 1.0 |
diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h
index 0878e267c3..558203052f 100644
--- a/include/libs/parser/parser.h
+++ b/include/libs/parser/parser.h
@@ -86,10 +86,11 @@ void qCleanupKeywordsTable();
int32_t qBuildStmtOutput(SQuery* pQuery, SHashObj* pVgHash, SHashObj* pBlockHash);
int32_t qResetStmtDataBlock(STableDataCxt* block, bool keepBuf);
int32_t qCloneStmtDataBlock(STableDataCxt** pDst, STableDataCxt* pSrc, bool reset);
-int32_t qRebuildStmtDataBlock(STableDataCxt** pDst, STableDataCxt* pSrc, uint64_t uid, uint64_t suid, int32_t vgId, bool rebuildCreateTb);
+int32_t qRebuildStmtDataBlock(STableDataCxt** pDst, STableDataCxt* pSrc, uint64_t uid, uint64_t suid, int32_t vgId,
+ bool rebuildCreateTb);
void qDestroyStmtDataBlock(STableDataCxt* pBlock);
STableMeta* qGetTableMetaInDataBlock(STableDataCxt* pDataBlock);
-int32_t qCloneCurrentTbData(STableDataCxt* pDataBlock, SSubmitTbData **pData);
+int32_t qCloneCurrentTbData(STableDataCxt* pDataBlock, SSubmitTbData** pData);
int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx);
int32_t qStmtParseQuerySql(SParseContext* pCxt, SQuery* pQuery);
@@ -104,17 +105,17 @@ void destroyBoundColumnInfo(void* pBoundInfo);
int32_t qCreateSName(SName* pName, const char* pTableName, int32_t acctId, char* dbName, char* msgBuf,
int32_t msgBufLen);
-void qDestroyBoundColInfo(void* pInfo);
+void qDestroyBoundColInfo(void* pInfo);
-SQuery* smlInitHandle();
-int32_t smlBuildRow(STableDataCxt* pTableCxt);
-int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void *kv, int32_t index);
+SQuery* smlInitHandle();
+int32_t smlBuildRow(STableDataCxt* pTableCxt);
+int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* kv, int32_t index);
STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta);
-int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols, STableMeta* pTableMeta,
- char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, int16_t msgBufLen);
+int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
+ STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
+ char* msgBuf, int16_t msgBufLen);
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
-
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD *fields, int numFields, bool needChangeLength);
int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray);
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index 7f95ca3d72..dfdbaa6fdd 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -210,8 +210,8 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} || :
[ -x ${install_main_dir}/bin/${udfdName} ] && ${csudo}ln -s ${install_main_dir}/bin/${udfdName} ${bin_link_dir}/${udfdName} || :
[ -x ${install_main_dir}/bin/${adapterName} ] && ${csudo}ln -s ${install_main_dir}/bin/${adapterName} ${bin_link_dir}/${adapterName} || :
- [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || :
- [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || :
+ [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || :
+ [ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -sf ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || :
[ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || :
[ -x ${install_main_dir}/bin/${xname} ] && ${csudo}ln -s ${install_main_dir}/bin/${xname} ${bin_link_dir}/${xname} || :
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
@@ -787,7 +787,7 @@ function updateProduct() {
if echo $osinfo | grep -qwi "centos"; then
rpm -q tdengine 2>&1 > /dev/null && rpm_erase tdengine ||:
elif echo $osinfo | grep -qwi "ubuntu"; then
- dpkg -l tdengine 2>&1 > /dev/null && deb_erase tdengine ||:
+ dpkg -l tdengine 2>&1 | grep ii > /dev/null && deb_erase tdengine ||:
fi
tar -zxf ${tarName}
diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c
index f5b65371a7..3ed157efef 100644
--- a/source/client/src/clientStmt.c
+++ b/source/client/src/clientStmt.c
@@ -393,7 +393,7 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
if (NULL == pStmt->sql.pTableCache || taosHashGetSize(pStmt->sql.pTableCache) <= 0) {
if (pStmt->bInfo.inExecCache) {
- if(ASSERT(taosHashGetSize(pStmt->exec.pBlockHash) == 1)){
+ if (ASSERT(taosHashGetSize(pStmt->exec.pBlockHash) == 1)) {
tscError("stmtGetFromCache error");
return TSDB_CODE_TSC_STMT_CACHE_ERROR;
}
diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c
index 01c99c6e9e..96f18e5fb6 100644
--- a/source/client/src/clientTmq.c
+++ b/source/client/src/clientTmq.c
@@ -1885,9 +1885,6 @@ int32_t tmq_consumer_close(tmq_t* tmq) {
}
tmq_list_destroy(lst);
-
- /*return rsp;*/
- return 0;
}
taosRemoveRef(tmqMgmt.rsetId, tmq->refId);
return 0;
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index 69f98c5e30..2d4c571d31 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -1431,6 +1431,7 @@ SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
pBlock->info.rows = 0;
pBlock->info.capacity = 0;
pBlock->info.rowSize = 0;
+ pBlock->info.id = pDataBlock->info.id;
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
index d441d3d187..7d11bc7082 100644
--- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
+++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c
@@ -167,6 +167,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_STB_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TTL_TABLE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_SMA_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TMQ_SUBSCRIBE_RSP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
index a0c1754e82..8049db9c78 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
@@ -79,8 +79,6 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) {
char path[TSDB_FILENAME_LEN] = {0};
- vnodeProposeCommitOnNeed(pVnode->pImpl);
-
taosThreadRwlockWrlock(&pMgmt->lock);
taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t));
taosThreadRwlockUnlock(&pMgmt->lock);
diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c
index eeb4249217..eb4fc3cdad 100644
--- a/source/dnode/mnode/impl/src/mndConsumer.c
+++ b/source/dnode/mnode/impl/src/mndConsumer.c
@@ -839,10 +839,14 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
char *addedTopic = strdup(taosArrayGetP(pNewConsumer->rebNewTopics, 0));
// not exist in current topic
-#if 0
+
+ bool existing = false;
+#if 1
for (int32_t i = 0; i < taosArrayGetSize(pOldConsumer->currentTopics); i++) {
char *topic = taosArrayGetP(pOldConsumer->currentTopics, i);
- A(strcmp(topic, addedTopic) != 0);
+ if (strcmp(topic, addedTopic) == 0) {
+ existing = true;
+ }
}
#endif
@@ -857,8 +861,10 @@ static int32_t mndConsumerActionUpdate(SSdb *pSdb, SMqConsumerObj *pOldConsumer,
}
// add to current topic
- taosArrayPush(pOldConsumer->currentTopics, &addedTopic);
- taosArraySort(pOldConsumer->currentTopics, taosArrayCompareString);
+ if (!existing) {
+ taosArrayPush(pOldConsumer->currentTopics, &addedTopic);
+ taosArraySort(pOldConsumer->currentTopics, taosArrayCompareString);
+ }
// set status
if (taosArrayGetSize(pOldConsumer->rebNewTopics) == 0 && taosArrayGetSize(pOldConsumer->rebRemovedTopics) == 0) {
diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c
index 73a296ebe4..6dedeaf8f3 100644
--- a/source/dnode/mnode/impl/src/mndStb.c
+++ b/source/dnode/mnode/impl/src/mndStb.c
@@ -41,6 +41,7 @@ static int32_t mndProcessTtlTimer(SRpcMsg *pReq);
static int32_t mndProcessCreateStbReq(SRpcMsg *pReq);
static int32_t mndProcessAlterStbReq(SRpcMsg *pReq);
static int32_t mndProcessDropStbReq(SRpcMsg *pReq);
+static int32_t mndProcessDropTtltbReq(SRpcMsg *pReq);
static int32_t mndProcessTableMetaReq(SRpcMsg *pReq);
static int32_t mndRetrieveStb(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
@@ -65,6 +66,7 @@ int32_t mndInitStb(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_MND_ALTER_STB, mndProcessAlterStbReq);
mndSetMsgHandle(pMnode, TDMT_MND_DROP_STB, mndProcessDropStbReq);
mndSetMsgHandle(pMnode, TDMT_VND_CREATE_STB_RSP, mndTransProcessRsp);
+ mndSetMsgHandle(pMnode, TDMT_VND_DROP_TTL_TABLE_RSP, mndProcessDropTtltbReq);
mndSetMsgHandle(pMnode, TDMT_VND_ALTER_STB_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_VND_DROP_STB_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_MND_TABLE_META, mndProcessTableMetaReq);
@@ -2180,6 +2182,10 @@ static int32_t mndCheckDropStbForStream(SMnode *pMnode, const char *stbFullName,
return 0;
}
+static int32_t mndProcessDropTtltbReq(SRpcMsg *pRsp) {
+ return 0;
+}
+
static int32_t mndProcessDropStbReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c
index 860db20fa8..1d2f4da26b 100644
--- a/source/dnode/snode/src/snode.c
+++ b/source/dnode/snode/src/snode.c
@@ -93,6 +93,8 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) {
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle);
ASSERT(pTask->exec.executor);
+ streamSetupTrigger(pTask);
+
return 0;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index f9c2f0b3fa..fc0678e3e6 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -218,6 +218,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader);
static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader);
static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo);
static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter);
+static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order);
static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
@@ -1192,9 +1193,9 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
int32_t unDumpedRows = asc ? pBlock->nRow - pDumpInfo->rowIndex : pDumpInfo->rowIndex + 1;
tsdbDebug("%p copy file block to sdatablock, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
- ", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
+ ", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", uid:%"PRIu64" elapsed time:%.2f ms, %s",
pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, dumpedRows,
- unDumpedRows, pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr);
+ unDumpedRows, pBlock->minVer, pBlock->maxVer, pBlockInfo->uid, elapsedTime, pReader->idStr);
return TSDB_CODE_SUCCESS;
}
@@ -2268,17 +2269,17 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea
if (code == TSDB_CODE_SUCCESS) {
pBlockScanInfo->iter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iter.iter) != NULL);
- tsdbDebug("%p uid:%" PRId64 ", check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
+ tsdbDebug("%p uid:%" PRIu64 ", check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
"-%" PRId64 " %s",
pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, d->minKey, d->maxKey, pReader->idStr);
} else {
- tsdbError("%p uid:%" PRId64 ", failed to create iterator for imem, code:%s, %s", pReader, pBlockScanInfo->uid,
+ tsdbError("%p uid:%" PRIu64 ", failed to create iterator for imem, code:%s, %s", pReader, pBlockScanInfo->uid,
tstrerror(code), pReader->idStr);
return code;
}
}
} else {
- tsdbDebug("%p uid:%" PRId64 ", no data in mem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
+ tsdbDebug("%p uid:%" PRIu64 ", no data in mem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
}
STbData* di = NULL;
@@ -2289,17 +2290,17 @@ static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbRea
if (code == TSDB_CODE_SUCCESS) {
pBlockScanInfo->iiter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iiter.iter) != NULL);
- tsdbDebug("%p uid:%" PRId64 ", check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
+ tsdbDebug("%p uid:%" PRIu64 ", check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
"-%" PRId64 " %s",
pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, di->minKey, di->maxKey, pReader->idStr);
} else {
- tsdbError("%p uid:%" PRId64 ", failed to create iterator for mem, code:%s, %s", pReader, pBlockScanInfo->uid,
+ tsdbError("%p uid:%" PRIu64 ", failed to create iterator for mem, code:%s, %s", pReader, pBlockScanInfo->uid,
tstrerror(code), pReader->idStr);
return code;
}
}
} else {
- tsdbDebug("%p uid:%" PRId64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
+ tsdbDebug("%p uid:%" PRIu64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
}
initDelSkylineIterator(pBlockScanInfo, pReader, d, di);
@@ -2609,6 +2610,14 @@ _end:
void setComposedBlockFlag(STsdbReader* pReader, bool composed) { pReader->status.composedDataBlock = composed; }
+int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order) {
+ if (pDelSkyline == NULL) {
+ return 0;
+ }
+
+ return ASCENDING_TRAVERSE(order) ? 0 : taosArrayGetSize(pDelSkyline) - 1;
+}
+
int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData,
STbData* piMemTbData) {
if (pBlockScanInfo->delSkyline != NULL) {
@@ -2626,7 +2635,6 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader*
if (pIdx != NULL) {
code = tsdbReadDelData(pReader->pDelFReader, pIdx, pDelData);
}
-
if (code != TSDB_CODE_SUCCESS) {
goto _err;
}
@@ -2655,11 +2663,13 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader*
}
taosArrayDestroy(pDelData);
- pBlockScanInfo->iter.index =
- ASCENDING_TRAVERSE(pReader->order) ? 0 : taosArrayGetSize(pBlockScanInfo->delSkyline) - 1;
- pBlockScanInfo->iiter.index = pBlockScanInfo->iter.index;
- pBlockScanInfo->fileDelIndex = pBlockScanInfo->iter.index;
- pBlockScanInfo->lastBlockDelIndex = pBlockScanInfo->iter.index;
+ int32_t index = getInitialDelIndex(pBlockScanInfo->delSkyline, pReader->order);
+
+ pBlockScanInfo->iter.index = index;
+ pBlockScanInfo->iiter.index = index;
+ pBlockScanInfo->fileDelIndex = index;
+ pBlockScanInfo->lastBlockDelIndex = index;
+
return code;
_err:
@@ -2758,7 +2768,7 @@ static int32_t uidComparFunc(const void* p1, const void* p2) {
}
}
-static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) {
+static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus, int32_t order) {
int32_t index = 0;
int32_t total = taosHashGetSize(pStatus->pTableMap);
@@ -2772,7 +2782,21 @@ static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SRea
taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc);
}
-static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) {
+// reset the last del file index
+static void resetScanBlockLastBlockDelIndex(SReaderStatus* pStatus, int32_t order) {
+ void* p = taosHashIterate(pStatus->pTableMap, NULL);
+ while (p != NULL) {
+ STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)p;
+
+ // reset the last del file index
+ pScanInfo->lastBlockDelIndex = getInitialDelIndex(pScanInfo->delSkyline, order);
+ p = taosHashIterate(pStatus->pTableMap, p);
+ }
+}
+
+static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, STsdbReader* pReader) {
+ SReaderStatus* pStatus = &pReader->status;
+
int32_t total = taosHashGetSize(pStatus->pTableMap);
if (total == 0) {
return TSDB_CODE_SUCCESS;
@@ -2785,7 +2809,7 @@ static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderSt
return TSDB_CODE_OUT_OF_MEMORY;
}
- extractOrderedTableUidList(pOrderCheckInfo, pStatus);
+ extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order);
uint64_t uid = pOrderCheckInfo->tableUidList[0];
pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
} else {
@@ -2802,7 +2826,7 @@ static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderSt
}
pOrderCheckInfo->tableUidList = p;
- extractOrderedTableUidList(pOrderCheckInfo, pStatus);
+ extractOrderedTableUidList(pOrderCheckInfo, pStatus, pReader->order);
uid = pOrderCheckInfo->tableUidList[0];
pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
@@ -2822,11 +2846,7 @@ static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus
uint64_t uid = pOrderedCheckInfo->tableUidList[pOrderedCheckInfo->currentIndex];
pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
- if (pStatus->pTableIter == NULL) {
- return false;
- }
-
- return true;
+ return (pStatus->pTableIter != NULL);
}
static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
@@ -2834,7 +2854,7 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader;
SUidOrderCheckInfo* pOrderedCheckInfo = &pStatus->uidCheckInfo;
- int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pStatus);
+ int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pReader);
if (code != TSDB_CODE_SUCCESS || (taosHashGetSize(pStatus->pTableMap) == 0)) {
return code;
}
@@ -2899,6 +2919,8 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
+ ASSERT(pBlockInfo != NULL);
+
if (pBlockInfo != NULL) {
pScanInfo =
*(STableBlockScanInfo**)taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
@@ -2919,7 +2941,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
TSDBKEY keyInBuf = getCurrentKeyInBuf(pScanInfo, pReader);
- if (pBlockInfo == NULL) { // build data block from last data file
+ /*if (pBlockInfo == NULL) { // build data block from last data file
SBlockData* pBData = &pReader->status.fileBlockData;
tBlockDataReset(pBData);
@@ -2951,7 +2973,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
pResBlock->info.rows, el, pReader->idStr);
}
- } else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) {
+ } else*/ if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) {
code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pScanInfo->uid);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -3130,6 +3152,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
// this file does not have data files, let's start check the last block file if exists
if (pBlockIter->numOfBlocks == 0) {
+ resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order);
goto _begin;
}
}
@@ -3166,6 +3189,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
tBlockDataReset(pBlockData);
resetDataBlockIterator(pBlockIter, pReader->order);
+ resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order);
goto _begin;
} else {
code = initForFirstBlockInFile(pReader, pBlockIter);
@@ -3177,6 +3201,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
// this file does not have blocks, let's start check the last block file
if (pBlockIter->numOfBlocks == 0) {
+ resetScanBlockLastBlockDelIndex(&pReader->status, pReader->order);
goto _begin;
}
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
index c280e8c0e7..0e804bc65e 100644
--- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
+++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
@@ -1207,7 +1207,7 @@ static int32_t tsdbSnapWriteTableRow(STsdbSnapWriter* pWriter, TSDBROW* pRow) {
TSDB_CHECK_CODE(code, lino, _exit);
}
- tMapDataPutItem(&pWriter->pDIter->dIter.mDataBlk, &dataBlk, tPutDataBlk);
+ tMapDataPutItem(&pWriter->mDataBlk, &dataBlk, tPutDataBlk);
pWriter->pDIter->dIter.iDataBlk++;
} else {
code = tsdbReadDataBlockEx(pWriter->pDataFReader, &dataBlk, &pWriter->pDIter->dIter.bData);
@@ -1645,8 +1645,8 @@ _exit:
if (code) {
tsdbError("vgId:%d %s failed at line %d since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, lino, tstrerror(code));
} else {
- tsdbTrace("vgId:%d %s done, suid:%" PRId64 " uid:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), __func__, pId->suid,
- pId->uid);
+ tsdbTrace("vgId:%d %s done, suid:%" PRId64 " uid:%" PRId64, TD_VID(pWriter->pTsdb->pVnode), __func__,
+ pWriter->tbid.suid, pWriter->tbid.uid);
}
return code;
}
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index 7afc9ef830..d939f7ea77 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -717,9 +717,10 @@ void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SG
SDiskbasedBuf* pBuf);
bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo);
+bool hasSlimitOffsetInfo(SLimitInfo* pLimitInfo);
void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo);
void resetLimitInfoForNextGroup(SLimitInfo* pLimitInfo);
-bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator);
+bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
void applyAggFunctionOnPartialTuples(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData,
int32_t offset, int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput);
diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c
index 037b33dc9f..e5089ab4a9 100644
--- a/source/libs/executor/src/exchangeoperator.c
+++ b/source/libs/executor/src/exchangeoperator.c
@@ -218,10 +218,7 @@ static SSDataBlock* loadRemoteData(SOperatorInfo* pOperator) {
if (status == PROJECT_RETRIEVE_CONTINUE) {
continue;
} else if (status == PROJECT_RETRIEVE_DONE) {
- size_t rows = pBlock->info.rows;
- pExchangeInfo->limitInfo.numOfOutputRows += rows;
-
- if (rows == 0) {
+ if (pBlock->info.rows == 0) {
setOperatorCompleted(pOperator);
return NULL;
} else {
@@ -707,6 +704,8 @@ int32_t prepareLoadRemoteData(SOperatorInfo* pOperator) {
}
int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf) {
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+
if (pLimitInfo->remainGroupOffset > 0) {
if (pLimitInfo->currentGroupId == 0) { // it is the first group
pLimitInfo->currentGroupId = pBlock->info.id.groupId;
@@ -750,36 +749,20 @@ int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDa
// set current group id
pLimitInfo->currentGroupId = pBlock->info.id.groupId;
- if (pLimitInfo->remainOffset >= pBlock->info.rows) {
- pLimitInfo->remainOffset -= pBlock->info.rows;
- blockDataCleanup(pBlock);
+ bool limitReached = applyLimitOffset(pLimitInfo, pBlock, pTaskInfo);
+ if (pBlock->info.rows == 0) {
return PROJECT_RETRIEVE_CONTINUE;
- } else if (pLimitInfo->remainOffset < pBlock->info.rows && pLimitInfo->remainOffset > 0) {
- blockDataTrimFirstNRows(pBlock, pLimitInfo->remainOffset);
- pLimitInfo->remainOffset = 0;
- }
-
- // check for the limitation in each group
- if (pLimitInfo->limit.limit >= 0 && pLimitInfo->numOfOutputRows + pBlock->info.rows >= pLimitInfo->limit.limit) {
- int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows);
- blockDataKeepFirstNRows(pBlock, keepRows);
- if (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) {
+ } else {
+ if (limitReached && (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
setOperatorCompleted(pOperator);
- } else {
- // current group limitation is reached, and future blocks of this group need to be discarded.
- if (pBlock->info.rows == 0) {
- return PROJECT_RETRIEVE_CONTINUE;
- }
+ return PROJECT_RETRIEVE_DONE;
}
-
- return PROJECT_RETRIEVE_DONE;
}
// todo optimize performance
// If there are slimit/soffset value exists, multi-round result can not be packed into one group, since the
// they may not belong to the same group the limit/offset value is not valid in this case.
- if ((!holdDataInBuf) || (pBlock->info.rows >= pOperator->resultInfo.threshold) || pLimitInfo->slimit.offset != -1 ||
- pLimitInfo->slimit.limit != -1) {
+ if ((!holdDataInBuf) || (pBlock->info.rows >= pOperator->resultInfo.threshold) || hasSlimitOffsetInfo(pLimitInfo)) {
return PROJECT_RETRIEVE_DONE;
} else { // not full enough, continue to accumulate the output data in the buffer.
return PROJECT_RETRIEVE_CONTINUE;
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index f8cce56dd4..540b5e90f8 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -1761,6 +1761,10 @@ bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo) {
pLimitInfo->slimit.offset != -1);
}
+bool hasSlimitOffsetInfo(SLimitInfo* pLimitInfo) {
+ return (pLimitInfo->slimit.limit != -1 || pLimitInfo->slimit.offset != -1);
+}
+
void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo) {
SLimit limit = {.limit = getLimit(pLimit), .offset = getOffset(pLimit)};
SLimit slimit = {.limit = getLimit(pSLimit), .offset = getOffset(pSLimit)};
diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c
index 6c2bcf086d..f84871ea92 100644
--- a/source/libs/executor/src/projectoperator.c
+++ b/source/libs/executor/src/projectoperator.c
@@ -90,7 +90,16 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
pInfo->binfo.pRes = pResBlock;
pInfo->pFinalRes = createOneDataBlock(pResBlock, false);
- pInfo->mergeDataBlocks = (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) ? false : pProjPhyNode->mergeDataBlock;
+
+ if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
+ pInfo->mergeDataBlocks = false;
+ } else {
+ if (!pProjPhyNode->ignoreGroupId) {
+ pInfo->mergeDataBlocks = false;
+ } else {
+ pInfo->mergeDataBlocks = pProjPhyNode->mergeDataBlock;
+ }
+ }
int32_t numOfRows = 4096;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
@@ -185,36 +194,15 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS
SOperatorInfo* pOperator) {
// set current group id
pLimitInfo->currentGroupId = groupId;
-
- if (pLimitInfo->remainOffset >= pBlock->info.rows) {
- pLimitInfo->remainOffset -= pBlock->info.rows;
- blockDataCleanup(pBlock);
+ bool limitReached = applyLimitOffset(pLimitInfo, pBlock, pOperator->pTaskInfo);
+ if (pBlock->info.rows == 0) {
return PROJECT_RETRIEVE_CONTINUE;
- } else if (pLimitInfo->remainOffset < pBlock->info.rows && pLimitInfo->remainOffset > 0) {
- blockDataTrimFirstNRows(pBlock, pLimitInfo->remainOffset);
- pLimitInfo->remainOffset = 0;
- }
-
- // check for the limitation in each group
- if (pLimitInfo->limit.limit >= 0 && pLimitInfo->numOfOutputRows + pBlock->info.rows >= pLimitInfo->limit.limit) {
- int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows);
- blockDataKeepFirstNRows(pBlock, keepRows);
-
- // TODO: optimize it later when partition by + limit
- // all retrieved requirement has been fulfilled, let's finish this
- if ((pLimitInfo->slimit.limit == -1 && pLimitInfo->currentGroupId == 0) ||
- (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
+ } else {
+ if (limitReached && (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
setOperatorCompleted(pOperator);
- } else {
- // Even current group is done, there may be many vgroups remain existed, and we need to continue to retrieve data
- // from next group. So let's continue this retrieve process
- if (keepRows == 0) {
- return PROJECT_RETRIEVE_CONTINUE;
- }
}
}
- pLimitInfo->numOfOutputRows += pBlock->info.rows;
return PROJECT_RETRIEVE_DONE;
}
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index c019db1f42..121d47692c 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -256,12 +256,11 @@ static void doSetTagColumnData(STableScanBase* pTableScanInfo, SSDataBlock* pBlo
}
}
-// todo handle the slimit info
-bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo, SOperatorInfo* pOperator) {
+bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) {
SLimit* pLimit = &pLimitInfo->limit;
const char* id = GET_TASKID(pTaskInfo);
- if (pLimit->offset > 0 && pLimitInfo->remainOffset > 0) {
+ if (pLimitInfo->remainOffset > 0) {
if (pLimitInfo->remainOffset >= pBlock->info.rows) {
pLimitInfo->remainOffset -= pBlock->info.rows;
blockDataEmpty(pBlock);
@@ -276,12 +275,14 @@ bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo
if (pLimit->limit != -1 && pLimit->limit <= (pLimitInfo->numOfOutputRows + pBlock->info.rows)) {
// limit the output rows
int32_t keep = (int32_t)(pLimit->limit - pLimitInfo->numOfOutputRows);
-
blockDataKeepFirstNRows(pBlock, keep);
+
+ pLimitInfo->numOfOutputRows += pBlock->info.rows;
qDebug("output limit %" PRId64 " has reached, %s", pLimit->limit, id);
return true;
}
+ pLimitInfo->numOfOutputRows += pBlock->info.rows;
return false;
}
@@ -397,13 +398,12 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
}
}
- bool limitReached = applyLimitOffset(&pTableScanInfo->limitInfo, pBlock, pTaskInfo, pOperator);
+ bool limitReached = applyLimitOffset(&pTableScanInfo->limitInfo, pBlock, pTaskInfo);
if (limitReached) { // set operator flag is done
setOperatorCompleted(pOperator);
}
pCost->totalRows += pBlock->info.rows;
- pTableScanInfo->limitInfo.numOfOutputRows = pCost->totalRows;
return TSDB_CODE_SUCCESS;
}
@@ -2714,9 +2714,7 @@ SSDataBlock* getSortedTableMergeScanBlockData(SSortHandle* pHandle, SSDataBlock*
}
}
- applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo, pOperator);
- pInfo->limitInfo.numOfOutputRows += pResBlock->info.rows;
-
+ applyLimitOffset(&pInfo->limitInfo, pResBlock, pTaskInfo);
qDebug("%s get sorted row block, rows:%d, limit:%"PRId64, GET_TASKID(pTaskInfo), pResBlock->info.rows,
pInfo->limitInfo.numOfOutputRows);
diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c
index 97b4fd9dc4..98ef6b8a36 100644
--- a/source/libs/executor/src/sortoperator.c
+++ b/source/libs/executor/src/sortoperator.c
@@ -222,6 +222,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
T_LONG_JMP(pTaskInfo->env, code);
}
+ // multi-group case not handle here
SSDataBlock* pBlock = NULL;
while (1) {
pBlock = getSortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity,
@@ -236,28 +237,14 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
continue;
}
- // todo add the limit/offset info
- if (pInfo->limitInfo.remainOffset > 0) {
- if (pInfo->limitInfo.remainOffset >= blockDataGetNumOfRows(pBlock)) {
- pInfo->limitInfo.remainOffset -= pBlock->info.rows;
- continue;
- }
-
- blockDataTrimFirstNRows(pBlock, pInfo->limitInfo.remainOffset);
- pInfo->limitInfo.remainOffset = 0;
+ // there are bugs?
+ bool limitReached = applyLimitOffset(&pInfo->limitInfo, pBlock, pTaskInfo);
+ if (limitReached) {
+ resetLimitInfoForNextGroup(&pInfo->limitInfo);
}
- if (pInfo->limitInfo.limit.limit > 0 &&
- pInfo->limitInfo.limit.limit <= pInfo->limitInfo.numOfOutputRows + blockDataGetNumOfRows(pBlock)) {
- int32_t remain = pInfo->limitInfo.limit.limit - pInfo->limitInfo.numOfOutputRows;
- blockDataKeepFirstNRows(pBlock, remain);
- }
-
- size_t numOfRows = blockDataGetNumOfRows(pBlock);
- pInfo->limitInfo.numOfOutputRows += numOfRows;
- pOperator->resultInfo.totalRows += numOfRows;
-
- if (numOfRows > 0) {
+ pOperator->resultInfo.totalRows += pBlock->info.rows;
+ if (pBlock->info.rows > 0) {
break;
}
}
@@ -557,7 +544,6 @@ typedef struct SMultiwayMergeOperatorInfo {
SSDataBlock* pIntermediateBlock; // to hold the intermediate result
int64_t startTs; // sort start time
bool groupSort;
- bool hasGroupId;
uint64_t groupId;
STupleHandle* prefetchedTuple;
} SMultiwayMergeOperatorInfo;
@@ -604,7 +590,9 @@ int32_t openMultiwayMergeOperator(SOperatorInfo* pOperator) {
return TSDB_CODE_SUCCESS;
}
-static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* pHandle, int32_t capacity, SSDataBlock* p) {
+static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* pHandle, int32_t capacity,
+ SSDataBlock* p, bool* newgroup) {
+ *newgroup = false;
while (1) {
STupleHandle* pTupleHandle = NULL;
@@ -613,8 +601,12 @@ static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle*
pTupleHandle = tsortNextTuple(pHandle);
} else {
pTupleHandle = pInfo->prefetchedTuple;
- pInfo->groupId = tsortGetGroupId(pTupleHandle);
pInfo->prefetchedTuple = NULL;
+ uint64_t gid = tsortGetGroupId(pTupleHandle);
+ if (gid != pInfo->groupId) {
+ *newgroup = true;
+ pInfo->groupId = gid;
+ }
}
} else {
pTupleHandle = tsortNextTuple(pHandle);
@@ -627,12 +619,10 @@ static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle*
if (pInfo->groupSort) {
uint64_t tupleGroupId = tsortGetGroupId(pTupleHandle);
- if (!pInfo->hasGroupId) {
+ if (pInfo->groupId == 0 || pInfo->groupId == tupleGroupId) {
+ appendOneRowToDataBlock(p, pTupleHandle);
+ p->info.id.groupId = tupleGroupId;
pInfo->groupId = tupleGroupId;
- pInfo->hasGroupId = true;
- appendOneRowToDataBlock(p, pTupleHandle);
- } else if (pInfo->groupId == tupleGroupId) {
- appendOneRowToDataBlock(p, pTupleHandle);
} else {
pInfo->prefetchedTuple = pTupleHandle;
break;
@@ -645,11 +635,6 @@ static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle*
break;
}
}
-
- if (pInfo->groupSort) {
- pInfo->hasGroupId = false;
- }
-
}
SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, SArray* pColMatchInfo,
@@ -673,14 +658,19 @@ SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pData
}
SSDataBlock* p = pInfo->pIntermediateBlock;
+ bool newgroup = false;
while (1) {
- doGetSortedBlockData(pInfo, pHandle, capacity, p);
+ doGetSortedBlockData(pInfo, pHandle, capacity, p, &newgroup);
if (p->info.rows == 0) {
break;
}
- bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo, pOperator);
+ if (newgroup) {
+ resetLimitInfoForNextGroup(&pInfo->limitInfo);
+ }
+
+ bool limitReached = applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo);
if (limitReached) {
resetLimitInfoForNextGroup(&pInfo->limitInfo);
}
diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c
index f5ceeeafe7..7cb49c8f54 100644
--- a/source/libs/executor/src/timesliceoperator.c
+++ b/source/libs/executor/src/timesliceoperator.c
@@ -440,6 +440,11 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
break;
}
+ if (pSliceInfo->scalarSup.pExprInfo != NULL) {
+ SExprSupp* pExprSup = &pSliceInfo->scalarSup;
+ projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
+ }
+
int32_t code = initKeeperInfo(pSliceInfo, pBlock);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
@@ -538,6 +543,8 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision);
}
+ doFilter(pResBlock, pOperator->exprSupp.pFilterInfo, NULL);
+
// restore the value
setTaskStatus(pOperator->pTaskInfo, TASK_COMPLETED);
if (pResBlock->info.rows == 0) {
@@ -573,6 +580,11 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
}
}
+ code = filterInitFromNode((SNode*)pInterpPhyNode->node.pConditions, &pOperator->exprSupp.pFilterInfo, 0);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
pInfo->tsCol = extractColumnFromColumnNode((SColumnNode*)pInterpPhyNode->pTimeSeries);
pInfo->fillType = convertFillType(pInterpPhyNode->fillMode);
initResultSizeInfo(&pOperator->resultInfo, 4096);
@@ -629,6 +641,7 @@ void destroyTimeSliceOperatorInfo(void* param) {
taosMemoryFree(pKey->end.val);
}
taosArrayDestroy(pInfo->pLinearInfo);
+ cleanupExprSupp(&pInfo->scalarSup);
taosMemoryFree(pInfo->pFillColInfo);
taosMemoryFreeClear(param);
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index 648580b913..38108dcff6 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -2445,7 +2445,19 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p
pInfo->delKey = key;
}
int32_t prevEndPos = (forwardRows - 1) * step + startPos;
- ASSERT(pSDataBlock->info.window.skey > 0 && pSDataBlock->info.window.ekey > 0);
+ if (pSDataBlock->info.window.skey <= 0 || pSDataBlock->info.window.ekey <= 0) {
+ qError("table uid %" PRIu64 " data block timestamp range may not be calculated! minKey %" PRId64
+ ",maxKey %" PRId64,
+ pSDataBlock->info.id.uid, pSDataBlock->info.window.skey, pSDataBlock->info.window.ekey);
+ blockDataUpdateTsWindow(pSDataBlock, 0);
+
+ // timestamp of the data is incorrect
+ if (pSDataBlock->info.window.skey <= 0 || pSDataBlock->info.window.ekey <= 0) {
+ qError("table uid %" PRIu64 " data block timestamp is out of range! minKey %" PRId64 ",maxKey %" PRId64,
+ pSDataBlock->info.id.uid, pSDataBlock->info.window.skey, pSDataBlock->info.window.ekey);
+ }
+ }
+
if (IS_FINAL_OP(pInfo)) {
startPos = getNextQualifiedFinalWindow(&pInfo->interval, &nextWin, &pSDataBlock->info, tsCols, prevEndPos);
} else {
diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c
index 3c5d4d506a..630b61bd4b 100644
--- a/source/libs/parser/src/parInsertSql.c
+++ b/source/libs/parser/src/parInsertSql.c
@@ -1693,6 +1693,9 @@ static int32_t getTableSchemaFromMetaData(SInsertParseContext* pCxt, const SMeta
if (TSDB_CODE_SUCCESS == code && !isStb && TSDB_SUPER_TABLE == pStmt->pTableMeta->tableType) {
code = buildInvalidOperationMsg(&pCxt->msg, "insert data into super table is not supported");
}
+ if (TSDB_CODE_SUCCESS == code && isStb) {
+ code = storeTableMeta(pCxt, pStmt);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = getTableVgroupFromMetaData(pMetaData->pTableHash, pStmt, isStb);
}
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 4b3f807a53..ae3f5226f5 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -1550,11 +1550,14 @@ static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* p
// select percentile() without from clause is also valid
if ((NULL != pTable && (QUERY_NODE_REAL_TABLE != nodeType(pTable) ||
(TSDB_CHILD_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType &&
- TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType))) ||
- NULL != pSelect->pPartitionByList) {
+ TSDB_NORMAL_TABLE != ((SRealTableNode*)pTable)->pMeta->tableType)))) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE,
"%s is only supported in single table query", pFunc->functionName);
}
+ if (NULL != pSelect->pPartitionByList) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC,
+ "%s function is not supported in partition query", pFunc->functionName);
+ }
return TSDB_CODE_SUCCESS;
}
@@ -2979,7 +2982,7 @@ static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode*
intervalRange = pInterval->datum.i;
}
- if ((timeRange == 0) || (timeRange / intervalRange) >= MAX_INTERVAL_TIME_WINDOW) {
+ if ((timeRange / intervalRange) >= MAX_INTERVAL_TIME_WINDOW) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE);
}
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index 001ec66725..208f548457 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -1055,7 +1055,7 @@ static int32_t createProjectLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSel
TSWAP(pProject->node.pLimit, pSelect->pLimit);
TSWAP(pProject->node.pSlimit, pSelect->pSlimit);
- pProject->ignoreGroupId = (NULL == pSelect->pPartitionByList);
+ pProject->ignoreGroupId = pSelect->isSubquery ? true : (NULL == pSelect->pPartitionByList);
pProject->node.groupAction =
(!pSelect->isSubquery && pCxt->pPlanCxt->streamQuery) ? GROUP_ACTION_KEEP : GROUP_ACTION_CLEAR;
pProject->node.requireDataOrder = DATA_ORDER_LEVEL_NONE;
diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c
index 6208fc172a..361cf33d58 100644
--- a/source/libs/planner/src/planSpliter.c
+++ b/source/libs/planner/src/planSpliter.c
@@ -348,7 +348,8 @@ static bool stbSplIsPartTableAgg(SAggLogicNode* pAgg) {
return false;
}
if (NULL != pAgg->pGroupKeys) {
- return stbSplHasPartTbname(pAgg->pGroupKeys) && stbSplNotSystemScan((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0));
+ return stbSplHasPartTbname(pAgg->pGroupKeys) &&
+ stbSplNotSystemScan((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0));
}
return stbSplHasPartTbname(stbSplGetPartKeys((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0)));
}
@@ -559,6 +560,8 @@ static int32_t stbSplCreateMergeNode(SSplitContext* pCxt, SLogicSubplan* pSubpla
if (NULL == pMerge->node.pLimit) {
code = TSDB_CODE_OUT_OF_MEMORY;
}
+ ((SLimitNode*)pSplitNode->pLimit)->limit += ((SLimitNode*)pSplitNode->pLimit)->offset;
+ ((SLimitNode*)pSplitNode->pLimit)->offset = 0;
}
if (TSDB_CODE_SUCCESS == code) {
if (NULL == pSubplan) {
@@ -1039,21 +1042,29 @@ static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
return code;
}
-static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
- SLogicNode* pSplitNode = pInfo->pSplitNode;
+static int32_t stbSplGetSplitNodeForScan(SStableSplitInfo* pInfo, SLogicNode** pSplitNode) {
+ *pSplitNode = pInfo->pSplitNode;
if (NULL != pInfo->pSplitNode->pParent && QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) &&
NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit) {
- pSplitNode = pInfo->pSplitNode->pParent;
+ *pSplitNode = pInfo->pSplitNode->pParent;
if (NULL != pInfo->pSplitNode->pLimit) {
- pSplitNode->pLimit = nodesCloneNode(pInfo->pSplitNode->pLimit);
- if (NULL == pSplitNode->pLimit) {
+ (*pSplitNode)->pLimit = nodesCloneNode(pInfo->pSplitNode->pLimit);
+ if (NULL == (*pSplitNode)->pLimit) {
return TSDB_CODE_OUT_OF_MEMORY;
}
((SLimitNode*)pInfo->pSplitNode->pLimit)->limit += ((SLimitNode*)pInfo->pSplitNode->pLimit)->offset;
((SLimitNode*)pInfo->pSplitNode->pLimit)->offset = 0;
}
}
- int32_t code = splCreateExchangeNodeForSubplan(pCxt, pInfo->pSubplan, pSplitNode, SUBPLAN_TYPE_MERGE);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
+ SLogicNode* pSplitNode = NULL;
+ int32_t code = stbSplGetSplitNodeForScan(pInfo, &pSplitNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = splCreateExchangeNodeForSubplan(pCxt, pInfo->pSubplan, pSplitNode, SUBPLAN_TYPE_MERGE);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
(SNode*)splCreateScanSubplan(pCxt, pSplitNode, SPLIT_FLAG_STABLE_SPLIT));
@@ -1063,12 +1074,11 @@ static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSp
}
static int32_t stbSplSplitScanNodeWithPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
- SLogicNode* pSplitNode = pInfo->pSplitNode;
- if (NULL != pInfo->pSplitNode->pParent && QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) &&
- NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit) {
- pSplitNode = pInfo->pSplitNode->pParent;
+ SLogicNode* pSplitNode = NULL;
+ int32_t code = stbSplGetSplitNodeForScan(pInfo, &pSplitNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pSplitNode, NULL, pSplitNode, true);
}
- int32_t code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pSplitNode, NULL, pSplitNode, true);
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
(SNode*)splCreateScanSubplan(pCxt, pSplitNode, SPLIT_FLAG_STABLE_SPLIT));
diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h
index 787ef7501d..9553cb97b5 100644
--- a/source/libs/qworker/inc/qwInt.h
+++ b/source/libs/qworker/inc/qwInt.h
@@ -194,6 +194,8 @@ typedef struct SQWorker {
SMsgCb msgCb;
SQWStat stat;
int32_t *destroyed;
+
+ int8_t nodeStopped;
} SQWorker;
typedef struct SQWorkerMgmt {
diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c
index fdd2775daa..7ee7c50c96 100644
--- a/source/libs/qworker/src/qwUtil.c
+++ b/source/libs/qworker/src/qwUtil.c
@@ -213,9 +213,15 @@ int32_t qwAcquireTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
QW_SET_QTID(id, qId, tId, eId);
*ctx = taosHashAcquire(mgmt->ctxHash, id, sizeof(id));
+ int8_t nodeStopped = atomic_load_8(&mgmt->nodeStopped);
if (NULL == (*ctx)) {
- QW_TASK_DLOG_E("task ctx not exist, may be dropped");
- QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
+ if (!nodeStopped) {
+ QW_TASK_DLOG_E("task ctx not exist, may be dropped");
+ QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
+ } else {
+ QW_TASK_DLOG_E("node stopped");
+ QW_ERR_RET(TSDB_CODE_VND_STOPPED);
+ }
}
return TSDB_CODE_SUCCESS;
@@ -226,9 +232,16 @@ int32_t qwGetTaskCtx(QW_FPARAMS_DEF, SQWTaskCtx **ctx) {
QW_SET_QTID(id, qId, tId, eId);
*ctx = taosHashGet(mgmt->ctxHash, id, sizeof(id));
+ int8_t nodeStopped = atomic_load_8(&mgmt->nodeStopped);
+
if (NULL == (*ctx)) {
- QW_TASK_DLOG_E("task ctx not exist, may be dropped");
- QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
+ if (!nodeStopped) {
+ QW_TASK_DLOG_E("task ctx not exist, may be dropped");
+ QW_ERR_RET(TSDB_CODE_QRY_TASK_CTX_NOT_EXIST);
+ } else {
+ QW_TASK_DLOG_E("node stopped");
+ QW_ERR_RET(TSDB_CODE_VND_STOPPED);
+ }
}
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index 2f712e6eba..1e35529d27 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -1187,6 +1187,9 @@ void qWorkerStopAllTasks(void *qWorkerMgmt) {
uint64_t qId, tId, sId;
int32_t eId;
int64_t rId = 0;
+
+ atomic_store_8(&mgmt->nodeStopped, 1);
+
void *pIter = taosHashIterate(mgmt->ctxHash, NULL);
while (pIter) {
SQWTaskCtx *ctx = (SQWTaskCtx *)pIter;
diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c
index e26c8a5c5b..56c3c3ebef 100644
--- a/source/libs/stream/src/streamState.c
+++ b/source/libs/stream/src/streamState.c
@@ -199,7 +199,7 @@ void streamStateClose(SStreamState* pState) {
}
int32_t streamStateBegin(SStreamState* pState) {
- if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, tdbDefaultMalloc, tdbDefaultFree, NULL,
+ if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, NULL, NULL, NULL,
TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
tdbAbort(pState->pTdbState->db, pState->pTdbState->txn);
return -1;
@@ -215,7 +215,7 @@ int32_t streamStateCommit(SStreamState* pState) {
return -1;
}
- if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, tdbDefaultMalloc, tdbDefaultFree, NULL,
+ if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, NULL, NULL, NULL,
TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
return -1;
}
@@ -227,7 +227,7 @@ int32_t streamStateAbort(SStreamState* pState) {
return -1;
}
- if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, tdbDefaultMalloc, tdbDefaultFree, NULL,
+ if (tdbBegin(pState->pTdbState->db, &pState->pTdbState->txn, NULL, NULL, NULL,
TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) < 0) {
return -1;
}
diff --git a/source/libs/sync/inc/syncSnapshot.h b/source/libs/sync/inc/syncSnapshot.h
index 5277e7818f..063b4f51f5 100644
--- a/source/libs/sync/inc/syncSnapshot.h
+++ b/source/libs/sync/inc/syncSnapshot.h
@@ -24,7 +24,7 @@ extern "C" {
#define SYNC_SNAPSHOT_SEQ_INVALID -2
#define SYNC_SNAPSHOT_SEQ_FORCE_CLOSE -3
-#define SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT -1
+#define SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT -1
#define SYNC_SNAPSHOT_SEQ_BEGIN 0
#define SYNC_SNAPSHOT_SEQ_END 0x7FFFFFFF
diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c
index 6cc517fda0..bb3bb0d6a4 100644
--- a/source/libs/sync/src/syncPipeline.c
+++ b/source/libs/sync/src/syncPipeline.c
@@ -830,7 +830,7 @@ int32_t syncLogReplMgrReplicateProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, Sy
pMgr->endIndex = index + 1;
SSyncLogBuffer* pBuf = pNode->pLogBuf;
- sInfo("vgId:%d, probe peer:%" PRIx64 " with msg of index:%" PRId64 " term: %" PRId64 ". mgr (rs:%d): [%" PRId64
+ sTrace("vgId:%d, probe peer:%" PRIx64 " with msg of index:%" PRId64 " term: %" PRId64 ". mgr (rs:%d): [%" PRId64
" %" PRId64 ", %" PRId64 "), buffer: [%" PRId64 " %" PRId64 " %" PRId64 ", %" PRId64 ")",
pNode->vgId, pDestId->addr, index, term, pMgr->restored, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex,
pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex);
diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c
index 880c76e4dd..e61bcc9ffc 100644
--- a/source/libs/sync/src/syncSnapshot.c
+++ b/source/libs/sync/src/syncSnapshot.c
@@ -112,7 +112,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender) {
pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex;
pMsg->lastConfig = pSender->lastConfig;
pMsg->startTime = pSender->startTime;
- pMsg->seq = SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT;
+ pMsg->seq = SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT;
// event log
syncLogSendSyncSnapshotSend(pSender->pSyncNode, pMsg, "snapshot sender start");
@@ -379,7 +379,7 @@ void snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *p
}
pReceiver->start = true;
- pReceiver->ack = SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT;
+ pReceiver->ack = SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT;
pReceiver->term = pReceiver->pSyncNode->raftStore.currentTerm;
pReceiver->fromId = pPreMsg->srcId;
pReceiver->startTime = pPreMsg->startTime;
@@ -527,7 +527,7 @@ SyncIndex syncNodeGetSnapBeginIndex(SSyncNode *ths) {
return snapStart;
}
-static int32_t syncNodeOnSnapshotPre(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
+static int32_t syncNodeOnSnapshotPrep(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
SSyncSnapshotReceiver *pReceiver = pSyncNode->pNewNodeReceiver;
int64_t timeNow = taosGetTimestampMs();
int32_t code = 0;
@@ -565,7 +565,7 @@ _START_RECEIVER:
} else {
// waiting for clock match
while (timeNow < pMsg->startTime) {
- sRInfo(pReceiver, "snapshot receiver pre waitting for true time, now:%" PRId64 ", stime:%" PRId64, timeNow,
+ sRInfo(pReceiver, "snapshot receiver pre waitting for true time, now:%" PRId64 ", startTime:%" PRId64, timeNow,
pMsg->startTime);
taosMsleep(10);
timeNow = taosGetTimestampMs();
@@ -765,7 +765,7 @@ static int32_t syncNodeOnSnapshotEnd(SSyncNode *pSyncNode, SyncSnapshotSend *pMs
// receiver on message
//
-// condition 1, recv SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT
+// condition 1, recv SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT
// if receiver already start
// if sender.start-time > receiver.start-time, restart receiver(reply snapshot start)
// if sender.start-time = receiver.start-time, maybe duplicate msg
@@ -809,9 +809,9 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) {
int32_t code = 0;
if (pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER) {
if (pMsg->term == pSyncNode->raftStore.currentTerm) {
- if (pMsg->seq == SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT) {
+ if (pMsg->seq == SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT) {
syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "process seq pre-snapshot");
- code = syncNodeOnSnapshotPre(pSyncNode, pMsg);
+ code = syncNodeOnSnapshotPrep(pSyncNode, pMsg);
} else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_BEGIN) {
syncLogRecvSyncSnapshotSend(pSyncNode, pMsg, "process seq begin");
code = syncNodeOnSnapshotBegin(pSyncNode, pMsg);
@@ -848,7 +848,7 @@ int32_t syncNodeOnSnapshot(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) {
return code;
}
-static int32_t syncNodeOnSnapshotPreRsp(SSyncNode *pSyncNode, SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) {
+static int32_t syncNodeOnSnapshotPrepRsp(SSyncNode *pSyncNode, SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) {
SSnapshot snapshot = {0};
pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot);
@@ -945,8 +945,8 @@ int32_t syncNodeOnSnapshotRsp(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) {
if (pMsg->startTime != pSender->startTime) {
syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "snapshot sender and receiver time not match");
- sSError(pSender, "sender:%" PRId64 " receiver:%" PRId64 " time not match, code:0x%x", pMsg->startTime,
- pSender->startTime, pMsg->code);
+ sSError(pSender, "sender:%" PRId64 " receiver:%" PRId64 " time not match, error:%s 0x%x", pMsg->startTime,
+ pSender->startTime, tstrerror(pMsg->code), pMsg->code);
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
goto _ERROR;
}
@@ -961,15 +961,15 @@ int32_t syncNodeOnSnapshotRsp(SSyncNode *pSyncNode, const SRpcMsg *pRpcMsg) {
if (pMsg->code != 0) {
syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "receive error code");
- sSError(pSender, "snapshot sender receive error code:0x%x and stop sender", pMsg->code);
+ sSError(pSender, "snapshot sender receive error:%s 0x%x and stop sender", tstrerror(pMsg->code), pMsg->code);
terrno = pMsg->code;
goto _ERROR;
}
// prepare , send begin msg
- if (pMsg->ack == SYNC_SNAPSHOT_SEQ_PRE_SNAPSHOT) {
+ if (pMsg->ack == SYNC_SNAPSHOT_SEQ_PREP_SNAPSHOT) {
syncLogRecvSyncSnapshotRsp(pSyncNode, pMsg, "process seq pre-snapshot");
- return syncNodeOnSnapshotPreRsp(pSyncNode, pSender, pMsg);
+ return syncNodeOnSnapshotPrepRsp(pSyncNode, pSender, pMsg);
}
if (pSender->pReader == NULL || pSender->finish) {
diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c
index b246d9a79d..6a50572cba 100644
--- a/source/libs/sync/src/syncUtil.c
+++ b/source/libs/sync/src/syncUtil.c
@@ -141,20 +141,15 @@ static void syncLogReplMgrStates2Str(SSyncNode* pSyncNode, char* buf, int32_t bu
}
static void syncPeerState2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLen) {
- int32_t len = 1;
-
+ int32_t len = 0;
+ len += snprintf(buf + len, bufLen - len, "%s", "{");
for (int32_t i = 0; i < pSyncNode->replicaNum; ++i) {
SPeerState* pState = syncNodeGetPeerState(pSyncNode, &(pSyncNode->replicasId[i]));
if (pState == NULL) break;
-
- if (i < pSyncNode->replicaNum - 1) {
- len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 " %" PRId64 ", ", i, pState->lastSendIndex,
- pState->lastSendTime);
- } else {
- len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 " %" PRId64 "}", i, pState->lastSendIndex,
- pState->lastSendTime);
- }
+ len += snprintf(buf + len, bufLen - len, "%d:%" PRId64 " %" PRId64 "%s", i, pState->lastSendIndex,
+ pState->lastSendTime, (i < pSyncNode->replicaNum - 1) ? ", " : "");
}
+ len += snprintf(buf + len, bufLen - len, "%s", "}");
}
void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNode* pNode, const char* format, ...) {
@@ -245,7 +240,7 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla
char cfgStr[1024] = "";
syncCfg2SimpleStr(&pNode->raftCfg.cfg, cfgStr, sizeof(cfgStr));
- char peerStr[1024] = "{";
+ char peerStr[1024] = "";
syncPeerState2Str(pNode, peerStr, sizeof(peerStr));
char eventLog[512]; // {0};
@@ -255,20 +250,21 @@ void syncPrintSnapshotSenderLog(const char* flags, ELogLevel level, int32_t dfla
va_end(argpointer);
taosPrintLog(flags, level, dflag,
- "vgId:%d, %s, sync:%s, {%p s-param:%" PRId64 " e-param:%" PRId64 " laindex:%" PRId64 " laterm:%" PRIu64
- " lcindex:%" PRId64
- " seq:%d ack:%d finish:%d replica-index:%d dnode:%d}"
- ", tm:%" PRIu64 ", cmt:%" PRId64 ", fst:%" PRId64 ", lst:%" PRId64 ", min:%" PRId64 ", snap:%" PRId64
- ", snap-tm:%" PRIu64 ", sby:%d, stgy:%d, bch:%d, r-num:%d, lcfg:%" PRId64
- ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s, %s",
+ "vgId:%d, %s, sync:%s, snap-sender:{%p start:%" PRId64 " end:%" PRId64 " last-index:%" PRId64
+ " last-term:%" PRIu64 " last-cfg:%" PRId64
+ ", seq:%d ack:%d finish:%d, as:%d dnode:%d}"
+ ", term:%" PRIu64 ", commit-index:%" PRId64 ", firstver:%" PRId64 ", lastver:%" PRId64
+ ", min-match:%" PRId64 ", snap:{last-index:%" PRId64 ", term:%" PRIu64
+ "}, standby:%d, batch-sz:%d, replicas:%d, last-cfg:%" PRId64
+ ", chging:%d, restore:%d, quorum:%d, lc-timer:{elect:%" PRId64 ", hb:%" PRId64 "}, peer:%s, cfg:%s",
pNode->vgId, eventLog, syncStr(pNode->state), pSender, pSender->snapshotParam.start,
pSender->snapshotParam.end, pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm,
pSender->snapshot.lastConfigIndex, pSender->seq, pSender->ack, pSender->finish, pSender->replicaIndex,
DID(&pNode->replicasId[pSender->replicaIndex]), pNode->raftStore.currentTerm, pNode->commitIndex,
logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm,
- pNode->raftCfg.isStandBy, pNode->raftCfg.snapshotStrategy, pNode->raftCfg.batchSize, pNode->replicaNum,
- pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode),
- pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr);
+ pNode->raftCfg.isStandBy, pNode->raftCfg.batchSize, pNode->replicaNum, pNode->raftCfg.lastConfigIndex,
+ pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock,
+ pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr);
}
void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t dflag, SSyncSnapshotReceiver* pReceiver,
@@ -291,7 +287,7 @@ void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t df
char cfgStr[1024] = "";
syncCfg2SimpleStr(&pNode->raftCfg.cfg, cfgStr, sizeof(cfgStr));
- char peerStr[1024] = "{";
+ char peerStr[1024] = "";
syncPeerState2Str(pNode, peerStr, sizeof(peerStr));
char eventLog[512]; // {0};
@@ -300,22 +296,22 @@ void syncPrintSnapshotReceiverLog(const char* flags, ELogLevel level, int32_t df
int32_t writeLen = vsnprintf(eventLog, sizeof(eventLog), format, argpointer);
va_end(argpointer);
- taosPrintLog(flags, level, dflag,
- "vgId:%d, %s, sync:%s,"
- " {%p start:%d ack:%d term:%" PRIu64 " start-time:%" PRId64 " from dnode:%d s-param:%" PRId64
- " e-param:%" PRId64 " laindex:%" PRId64 " laterm:%" PRIu64 " lcindex:%" PRId64
- "}"
- ", tm:%" PRIu64 ", cmt:%" PRId64 ", fst:%" PRId64 ", lst:%" PRId64 ", min:%" PRId64 ", snap:%" PRId64
- ", snap-tm:%" PRIu64 ", sby:%d, stgy:%d, bch:%d, r-num:%d, lcfg:%" PRId64
- ", chging:%d, rsto:%d, dquorum:%d, elt:%" PRId64 ", hb:%" PRId64 ", %s, %s",
- pNode->vgId, eventLog, syncStr(pNode->state), pReceiver, pReceiver->start, pReceiver->ack,
- pReceiver->term, pReceiver->startTime, DID(&pReceiver->fromId), pReceiver->snapshotParam.start,
- pReceiver->snapshotParam.end, pReceiver->snapshot.lastApplyIndex, pReceiver->snapshot.lastApplyTerm,
- pReceiver->snapshot.lastConfigIndex, pNode->raftStore.currentTerm, pNode->commitIndex, logBeginIndex,
- logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm,
- pNode->raftCfg.isStandBy, pNode->raftCfg.snapshotStrategy, pNode->raftCfg.batchSize, pNode->replicaNum,
- pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode),
- pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr);
+ taosPrintLog(
+ flags, level, dflag,
+ "vgId:%d, %s, sync:%s,"
+ " snap-receiver:{%p started:%d acked:%d term:%" PRIu64 " start-time:%" PRId64 " from-dnode:%d, start:%" PRId64
+ " end:%" PRId64 " last-index:%" PRId64 " last-term:%" PRIu64 " last-cfg:%" PRId64
+ "}"
+ ", term:%" PRIu64 ", commit-index:%" PRId64 ", firstver:%" PRId64 ", lastver:%" PRId64 ", min-match:%" PRId64
+ ", snap:{last-index:%" PRId64 ", last-term:%" PRIu64 "}, standby:%d, batch-sz:%d, replicas:%d, last-cfg:%" PRId64
+ ", chging:%d, restore:%d, quorum:%d, lc-timers:{elect:%" PRId64 ", hb:%" PRId64 "}, peer:%s, cfg:%s",
+ pNode->vgId, eventLog, syncStr(pNode->state), pReceiver, pReceiver->start, pReceiver->ack, pReceiver->term,
+ pReceiver->startTime, DID(&pReceiver->fromId), pReceiver->snapshotParam.start, pReceiver->snapshotParam.end,
+ pReceiver->snapshot.lastApplyIndex, pReceiver->snapshot.lastApplyTerm, pReceiver->snapshot.lastConfigIndex,
+ pNode->raftStore.currentTerm, pNode->commitIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex,
+ snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->raftCfg.isStandBy, pNode->raftCfg.batchSize,
+ pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish,
+ syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, peerStr, cfgStr);
}
void syncLogRecvTimer(SSyncNode* pSyncNode, const SyncTimeout* pMsg, const char* s) {
@@ -351,13 +347,13 @@ void syncLogSendHeartbeat(SSyncNode* pSyncNode, const SyncHeartbeat* pMsg, bool
int64_t execTime) {
if (printX) {
sNTrace(pSyncNode,
- "send sync-heartbeat to dnode:%d {term:%" PRId64 ", cmt:%" PRId64 ", min-match:%" PRId64 ", ts:%" PRId64
- "}, x",
+ "send sync-heartbeat to dnode:%d {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64
+ ", ts:%" PRId64 "}, x",
DID(&pMsg->destId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp);
} else {
sNTrace(pSyncNode,
- "send sync-heartbeat to dnode:%d {term:%" PRId64 ", cmt:%" PRId64 ", min-match:%" PRId64 ", ts:%" PRId64
- "}, timer-elapsed:%" PRId64 ", next-exec:%" PRId64,
+ "send sync-heartbeat to dnode:%d {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64
+ ", ts:%" PRId64 "}, timer-elapsed:%" PRId64 ", next-exec:%" PRId64,
DID(&pMsg->destId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, timerElapsed,
execTime);
}
@@ -368,14 +364,14 @@ void syncLogRecvHeartbeat(SSyncNode* pSyncNode, const SyncHeartbeat* pMsg, int64
pSyncNode->hbSlowNum++;
sNInfo(pSyncNode,
- "recv sync-heartbeat from dnode:%d slow {term:%" PRId64 ", cmt:%" PRId64 ", min-match:%" PRId64
+ "recv sync-heartbeat from dnode:%d slow {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64
", ts:%" PRId64 "}, %s, net elapsed:%" PRId64,
DID(&pMsg->srcId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, s, timeDiff);
}
sNTrace(pSyncNode,
- "recv sync-heartbeat from dnode:%d {term:%" PRId64 ", cmt:%" PRId64 ", min-match:%" PRId64 ", ts:%" PRId64
- "}, %s, net elapsed:%" PRId64,
+ "recv sync-heartbeat from dnode:%d {term:%" PRId64 ", commit-index:%" PRId64 ", min-match:%" PRId64
+ ", ts:%" PRId64 "}, %s, net elapsed:%" PRId64,
DID(&pMsg->srcId), pMsg->term, pMsg->commitIndex, pMsg->minMatchIndex, pMsg->timeStamp, s, timeDiff);
}
@@ -400,67 +396,64 @@ void syncLogRecvHeartbeatReply(SSyncNode* pSyncNode, const SyncHeartbeatReply* p
void syncLogSendSyncSnapshotSend(SSyncNode* pSyncNode, const SyncSnapshotSend* pMsg, const char* s) {
sNDebug(pSyncNode,
- "send sync-snapshot-send to dnode:%d, %s, seq:%d, term:%" PRId64 ", begin:%" PRId64 ", end:%" PRId64
- ", lterm:%" PRId64 ", stime:%" PRId64,
+ "send sync-snapshot-send to dnode:%d, %s, seq:%d, term:%" PRId64 ", begin-index:%" PRId64
+ ", last-index:%" PRId64 ", last-term:%" PRId64 ", start-time:%" PRId64,
DID(&pMsg->destId), s, pMsg->seq, pMsg->term, pMsg->beginIndex, pMsg->lastIndex, pMsg->lastTerm,
pMsg->startTime);
}
void syncLogRecvSyncSnapshotSend(SSyncNode* pSyncNode, const SyncSnapshotSend* pMsg, const char* s) {
sNDebug(pSyncNode,
- "recv sync-snapshot-send from dnode:%d, %s, seq:%d, term:%" PRId64 ", begin:%" PRId64 ", lst:%" PRId64
- ", lterm:%" PRId64 ", stime:%" PRId64 ", len:%u",
+ "recv sync-snapshot-send from dnode:%d, %s, seq:%d, term:%" PRId64 ", begin-index:%" PRId64
+ ", last-index:%" PRId64 ", last-term:%" PRId64 ", start-time:%" PRId64 ", data-len:%u",
DID(&pMsg->srcId), s, pMsg->seq, pMsg->term, pMsg->beginIndex, pMsg->lastIndex, pMsg->lastTerm,
pMsg->startTime, pMsg->dataLen);
}
void syncLogSendSyncSnapshotRsp(SSyncNode* pSyncNode, const SyncSnapshotRsp* pMsg, const char* s) {
sNDebug(pSyncNode,
- "send sync-snapshot-rsp to dnode:%d, %s, ack:%d, term:%" PRId64 ", begin:%" PRId64 ", lst:%" PRId64
- ", lterm:%" PRId64 ", stime:%" PRId64,
+ "send sync-snapshot-rsp to dnode:%d, %s, acked:%d, term:%" PRId64 ", begin-index:%" PRId64
+ ", last-index:%" PRId64 ", last-term:%" PRId64 ", start-time:%" PRId64,
DID(&pMsg->destId), s, pMsg->ack, pMsg->term, pMsg->snapBeginIndex, pMsg->lastIndex, pMsg->lastTerm,
pMsg->startTime);
}
void syncLogRecvSyncSnapshotRsp(SSyncNode* pSyncNode, const SyncSnapshotRsp* pMsg, const char* s) {
sNDebug(pSyncNode,
- "recv sync-snapshot-rsp from dnode:%d, %s, ack:%d, term:%" PRId64 ", begin:%" PRId64 ", lst:%" PRId64
- ", lterm:%" PRId64 ", stime:%" PRId64,
+ "recv sync-snapshot-rsp from dnode:%d, %s, ack:%d, term:%" PRId64 ", begin-index:%" PRId64
+ ", last-index:%" PRId64 ", last-term:%" PRId64 ", start-time:%" PRId64,
DID(&pMsg->srcId), s, pMsg->ack, pMsg->term, pMsg->snapBeginIndex, pMsg->lastIndex, pMsg->lastTerm,
pMsg->startTime);
}
void syncLogRecvAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s) {
sNTrace(pSyncNode,
- "recv sync-append-entries from dnode:%d {term:%" PRId64 ", pre-index:%" PRId64 ", pre-term:%" PRId64
- ", cmt:%" PRId64 ", pterm:%" PRId64 ", datalen:%d}, %s",
- DID(&pMsg->srcId), pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, pMsg->privateTerm,
- pMsg->dataLen, s);
+ "recv sync-append-entries from dnode:%d {term:%" PRId64 ", prev-log:{index:%" PRId64 ", term:%" PRId64
+ "}, commit-index:%" PRId64 ", datalen:%d}, %s",
+ DID(&pMsg->srcId), pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, pMsg->dataLen, s);
}
void syncLogSendAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMsg, const char* s) {
sNTrace(pSyncNode,
- "send sync-append-entries to dnode:%d, {term:%" PRId64 ", pre-index:%" PRId64 ", pre-term:%" PRId64
- ", lsend-index:%" PRId64 ", cmt:%" PRId64 ", datalen:%d}, %s",
+ "send sync-append-entries to dnode:%d, {term:%" PRId64 ", prev-log:{index:%" PRId64 ", term:%" PRId64
+ "}, index:%" PRId64 ", commit-index:%" PRId64 ", datalen:%d}, %s",
DID(&pMsg->destId), pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, (pMsg->prevLogIndex + 1),
pMsg->commitIndex, pMsg->dataLen, s);
}
-void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, int32_t voteGranted, const char* s) {
- if (voteGranted == -1) {
- sNInfo(pSyncNode,
- "recv sync-request-vote from dnode:%d, {term:%" PRId64 ", lindex:%" PRId64 ", lterm:%" PRId64 "}, %s",
- DID(&pMsg->srcId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, s);
- } else {
- sNInfo(pSyncNode,
- "recv sync-request-vote from dnode:%d, {term:%" PRId64 ", lindex:%" PRId64 ", lterm:%" PRId64
- "}, granted:%d",
- DID(&pMsg->srcId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, voteGranted);
- }
+void syncLogRecvRequestVote(SSyncNode* pSyncNode, const SyncRequestVote* pMsg, int32_t voteGranted,
+ const char* errmsg) {
+ char statusMsg[64];
+ snprintf(statusMsg, sizeof(statusMsg), "granted:%d", voteGranted);
+ sNInfo(pSyncNode,
+ "recv sync-request-vote from dnode:%d, {term:%" PRId64 ", last-index:%" PRId64 ", last-term:%" PRId64 "}, %s",
+ DID(&pMsg->srcId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm,
+ (voteGranted != -1) ? statusMsg : errmsg);
}
void syncLogSendRequestVote(SSyncNode* pNode, const SyncRequestVote* pMsg, const char* s) {
- sNInfo(pNode, "send sync-request-vote to dnode:%d {term:%" PRId64 ", lindex:%" PRId64 ", lterm:%" PRId64 "}, %s",
+ sNInfo(pNode,
+ "send sync-request-vote to dnode:%d {term:%" PRId64 ", last-index:%" PRId64 ", last-term:%" PRId64 "}, %s",
DID(&pMsg->destId), pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, s);
}
diff --git a/source/util/src/talgo.c b/source/util/src/talgo.c
index a06aac6afe..e373850b3c 100644
--- a/source/util/src/talgo.c
+++ b/source/util/src/talgo.c
@@ -28,14 +28,14 @@ static void median(void *src, int64_t size, int64_t s, int64_t e, const void *pa
void *buf) {
int32_t mid = ((int32_t)(e - s) >> 1u) + (int32_t)s;
- if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, s), param) == 1) {
+ if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, s), param) > 0) {
doswap(elePtrAt(src, size, mid), elePtrAt(src, size, s), size, buf);
}
- if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, e), param) == 1) {
+ if (comparFn(elePtrAt(src, size, mid), elePtrAt(src, size, e), param) > 0) {
doswap(elePtrAt(src, size, mid), elePtrAt(src, size, s), size, buf);
doswap(elePtrAt(src, size, mid), elePtrAt(src, size, e), size, buf);
- } else if (comparFn(elePtrAt(src, size, s), elePtrAt(src, size, e), param) == 1) {
+ } else if (comparFn(elePtrAt(src, size, s), elePtrAt(src, size, e), param) > 0) {
doswap(elePtrAt(src, size, s), elePtrAt(src, size, e), size, buf);
}
@@ -47,7 +47,7 @@ static void tInsertSort(void *src, int64_t size, int32_t s, int32_t e, const voi
void *buf) {
for (int32_t i = s + 1; i <= e; ++i) {
for (int32_t j = i; j > s; --j) {
- if (comparFn(elePtrAt(src, size, j), elePtrAt(src, size, j - 1), param) == -1) {
+ if (comparFn(elePtrAt(src, size, j), elePtrAt(src, size, j - 1), param) < 0) {
doswap(elePtrAt(src, size, j), elePtrAt(src, size, j - 1), size, buf);
} else {
break;
diff --git a/tests/ci/Dockerfile b/tests/ci/Dockerfile
new file mode 100644
index 0000000000..594bcc902d
--- /dev/null
+++ b/tests/ci/Dockerfile
@@ -0,0 +1,48 @@
+FROM python:3.8
+RUN pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
+RUN pip3 install pandas psutil fabric2 requests faker simplejson toml pexpect tzlocal distro
+RUN apt-get update
+RUN apt-get install -y psmisc sudo tree libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config build-essential valgrind \
+ vim libjemalloc-dev openssh-server screen sshpass net-tools dirmngr gnupg apt-transport-https ca-certificates software-properties-common r-base iputils-ping
+RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9
+RUN add-apt-repository 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/'
+RUN apt install -y r-base
+ADD go1.17.6.linux-amd64.tar.gz /usr/local/
+ADD jdk-8u144-linux-x64.tar.gz /usr/local/
+ADD apache-maven-3.8.4-bin.tar.gz /usr/local/
+RUN apt-get install wget -y \
+ && wget https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb \
+ && dpkg -i packages-microsoft-prod.deb \
+ && rm packages-microsoft-prod.deb \
+ && apt-get update && apt-get install -y dotnet-sdk-5.0 && apt-get install -y dotnet-sdk-6.0
+ADD node-v12.20.0-linux-x64.tar.gz /usr/local/
+RUN sh -c "rm -f /etc/localtime;ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime;echo \"Asia/Shanghai\" >/etc/timezone"
+COPY id_rsa /root/.ssh/id_rsa
+COPY .m2 /root/.m2
+COPY .nuget /root/.nuget
+COPY .dotnet /root/.dotnet
+COPY .cargo /root/.cargo
+COPY go /root/go
+ADD cmake-3.21.5-linux-x86_64.tar.gz /usr/local/
+RUN echo " export RUSTUP_DIST_SERVER=\"https://rsproxy.cn\" " >> /root/.bashrc
+RUN echo " export RUSTUP_UPDATE_ROOT=\"https://rsproxy.cn/rustup\" " >> /root/.bashrc
+RUN curl https://sh.rustup.rs -o /tmp/rustup-init.sh
+RUN sh /tmp/rustup-init.sh -y
+ENV PATH /usr/local/go/bin:/usr/local/node-v12.20.0-linux-x64/bin:/usr/local/apache-maven-3.8.4/bin:/usr/local/jdk1.8.0_144/bin:/usr/local/cmake-3.21.5-linux-x86_64/bin:/root/.cargo/bin:$PATH
+ENV JAVA_HOME /usr/local/jdk1.8.0_144
+RUN go env -w GOPROXY=https://goproxy.cn
+RUN echo "StrictHostKeyChecking no" >>/etc/ssh/ssh_config
+RUN npm config -g set unsafe-perm
+RUN npm config -g set registry https://registry.npm.taobao.org
+COPY .npm /root/.npm
+RUN R CMD javareconf JAVA_HOME=${JAVA_HOME} JAVA=${JAVA_HOME}/bin/java JAVAC=${JAVA_HOME}/bin/javac JAVAH=${JAVA_HOME}/bin/javah JAR=${JAVA_HOME}/bin/jar
+RUN echo "install.packages(\"RJDBC\", repos=\"http://cran.us.r-project.org\")"|R --no-save
+COPY .gitconfig /root/.gitconfig
+RUN mkdir -p /run/sshd
+COPY id_rsa.pub /root/.ssh/id_rsa.pub
+COPY id_rsa.pub /root/.ssh/authorized_keys
+RUN pip3 uninstall -y taostest
+COPY repository/TDinternal /home/TDinternal
+COPY repository/taos-connector-python /home/taos-connector-python
+RUN sh -c "cd /home/taos-connector-python; pip3 install ."
+COPY setup.sh /home/setup.sh
\ No newline at end of file
diff --git a/tests/ci/build_image.sh b/tests/ci/build_image.sh
new file mode 100755
index 0000000000..1864df35db
--- /dev/null
+++ b/tests/ci/build_image.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+docker build --no-cache -t taos_test:v1.0 .
+
diff --git a/tests/ci/daily_build_image.sh b/tests/ci/daily_build_image.sh
new file mode 100755
index 0000000000..01148a3aae
--- /dev/null
+++ b/tests/ci/daily_build_image.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+set -x
+
+script_dir=`dirname $0`
+cd $script_dir
+script_dir=`pwd`
+cd $script_dir/repository/taos-connector-python
+git pull
+
+cd $script_dir/repository/TDinternal
+git clean -fxd
+git pull
+
+cd $script_dir/repository/TDinternal/community
+git clean -fxd
+git checkout main
+git pull origin main
+git submodule update --init --recursive
+
+cd $script_dir
+cp $script_dir/repository/TDinternal/community/tests/ci/build_image.sh .
+cp $script_dir/repository/TDinternal/community/tests/ci/daily_build_image.sh .
+
+./build_image.sh || exit 1
+docker image prune -f
+ips="\
+192.168.1.47 \
+192.168.1.48 \
+192.168.1.49 \
+192.168.1.52 \
+192.168.0.215 \
+192.168.0.217 \
+192.168.0.219 \
+"
+
+image=taos_image.tar
+
+docker save taos_test:v1.0 -o $image
+
+for ip in $ips; do
+ echo "scp $image root@$ip:/home/ &"
+ scp $image root@$ip:/home/ &
+done
+wait
+
+for ip in $ips; do
+ echo "ssh root@$ip docker load -i /home/$image &"
+ ssh root@$ip docker load -i /home/$image &
+done
+wait
+
+for ip in $ips; do
+ echo "ssh root@$ip rm -f /home/$image &"
+ ssh root@$ip rm -f /home/$image &
+done
+wait
+
+rm -rf taos_image.tar
+
diff --git a/tests/script/tsim/parser/interp.sim b/tests/script/tsim/parser/interp.sim
index 1b7878178c..e6512a22d7 100644
--- a/tests/script/tsim/parser/interp.sim
+++ b/tests/script/tsim/parser/interp.sim
@@ -72,4 +72,33 @@ sql_error select interp(*) from nt5931 where ts=now
sql_error select interp(*) from st5931 where ts=now
sql_error select interp(*) from ct5931 where ts=now
+sql create stable sta (ts timestamp, f1 double, f2 binary(200)) tags(t1 int);
+sql create table tba1 using sta tags(1);
+sql insert into tba1 values ('2022-04-26 15:15:01', -3.0, "a");
+sql insert into tba1 values ('2022-04-26 15:15:05', 3.0, "b");
+sql select a from (select interp(f1) as a from tba1 where ts >= '2022-04-26 15:15:01' and ts <= '2022-04-26 15:15:05' range('2022-04-26 15:15:01','2022-04-26 15:15:05') every(1s) fill(linear)) where a > 0;
+if $rows != 2 then
+ return -1
+endi
+if $data00 != 1.500000000 then
+ return -1
+endi
+if $data10 != 3.000000000 then
+ return -1
+endi
+
+sql select a from (select interp(f1+1) as a from tba1 where ts >= '2022-04-26 15:15:01' and ts <= '2022-04-26 15:15:05' range('2022-04-26 15:15:01','2022-04-26 15:15:05') every(1s) fill(linear)) where a > 0;
+if $rows != 3 then
+ return -1
+endi
+if $data00 != 1.000000000 then
+ return -1
+endi
+if $data10 != 2.500000000 then
+ return -1
+endi
+if $data20 != 4.000000000 then
+ return -1
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/regressiontest.sim b/tests/script/tsim/parser/regressiontest.sim
index 1b127155cb..3ce2b47b44 100644
--- a/tests/script/tsim/parser/regressiontest.sim
+++ b/tests/script/tsim/parser/regressiontest.sim
@@ -63,4 +63,38 @@ if $rows != 8198 then
return -1
endi
+print ===========================> TD-22077 && TD-21877
+sql drop database if exists $db -x step1
+sql create database $db vgroups 1;
+
+sql use $db
+sql create stable st1 (ts timestamp, c int) tags(a int);
+sql create table t1 using st1 tags(1);
+sql create table t2 using st1 tags(2);
+
+$i = 0
+$ts = 1674977959000
+$rowNum = 200
+
+$x = 0
+while $x < $rowNum
+$xs = $x * $delta
+$ts = $ts0 + $xs
+sql insert into t1 values ( $ts , $x )
+sql insert into t2 values ( $ts + 1000a, $x )
+$x = $x + 1
+$ts = $ts + 1000
+endw
+
+sql flush database $db
+
+sql insert into t1 values('2018-09-17 09:00:26', 26);
+sql insert into t2 values('2018-09-17 09:00:25', 25);
+
+sql insert into t2 values('2018-09-17 09:00:30', 30);
+sql flush database reg_db0;
+
+sql delete from st1 where ts<='2018-9-17 09:00:26';
+sql select * from st1;
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stream/basic1.sim b/tests/script/tsim/stream/basic1.sim
index 7bf10df637..c61c7667f8 100644
--- a/tests/script/tsim/stream/basic1.sim
+++ b/tests/script/tsim/stream/basic1.sim
@@ -834,4 +834,57 @@ endi
print ====== test _wstart end
+print insert into ts1 values(-1648791211000,1,2,3)
+
+sql create database test7 vgroups 1;
+sql use test7;
+sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int);
+sql create table ts1 using st tags(1,1,1);
+sql create stream streams7 trigger at_once into streamt7 as select _wstart, count(*) from ts1 interval(10s) ;
+
+sql insert into ts1 values(1648791211000,1,2,3);
+sql_error insert into ts1 values(-1648791211000,1,2,3);
+
+loop18:
+
+sleep 200
+sql select * from streamt7;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 1 then
+ print =====rows=$rows
+ goto loop18
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop18
+endi
+
+sql_error insert into ts1 values(-1648791211001,1,2,3) (1648791211001,1,2,3);
+
+sql select _wstart, count(*) from ts1 interval(10s) ;
+
+print $data00 $data01
+print $data10 $data11
+
+loop19:
+
+sleep 200
+sql select * from streamt7;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 1 then
+ print =====rows=$rows
+ goto loop19
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stream/triggerInterval0.sim b/tests/script/tsim/stream/triggerInterval0.sim
index 7353f026bb..b522dcf035 100644
--- a/tests/script/tsim/stream/triggerInterval0.sim
+++ b/tests/script/tsim/stream/triggerInterval0.sim
@@ -29,69 +29,119 @@ sql insert into t1 values(1648791223001,2,2,3,1.1);
sql insert into t1 values(1648791223002,2,2,3,1.1);
sql insert into t1 values(1648791223003,2,2,3,1.1);
sql insert into t1 values(1648791223001,2,2,3,1.1);
+
+print step 0
+
+$loop_count = 0
+
+loop0:
sleep 300
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
sql select * from streamt;
+
if $rows != 1 then
print ======$rows
- return -1
+ goto loop0
endi
if $data01 != 1 then
print ======$data01
- return -1
+ goto loop0
endi
sql insert into t1 values(1648791233001,2,2,3,1.1);
+
+print step 1
+
+$loop_count = 0
+
+loop1:
sleep 300
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
sql select * from streamt;
if $rows != 2 then
print ======$rows
- return -1
+ goto loop1
endi
if $data01 != 1 then
print ======$data01
- return -1
+ goto loop1
endi
if $data11 != 3 then
print ======$data11
- return -1
+ goto loop1
endi
sql insert into t1 values(1648791223004,2,2,3,1.1);
sql insert into t1 values(1648791223004,2,2,3,1.1);
sql insert into t1 values(1648791223005,2,2,3,1.1);
+
+print step 2
+
+$loop_count = 0
+
+loop2:
sleep 300
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
sql select * from streamt;
if $rows != 2 then
print ======$rows
- return -1
+ goto loop2
endi
+
if $data01 != 1 then
print ======$data01
- return -1
+ goto loop2
endi
if $data11 != 5 then
print ======$data11
- return -1
+ goto loop2
endi
sql insert into t1 values(1648791233002,3,2,3,2.1);
sql insert into t1 values(1648791213002,4,2,3,3.1)
sql insert into t1 values(1648791213002,4,2,3,4.1);
+
+print step 3
+
+$loop_count = 0
+
+loop3:
sleep 300
-sql select * from streamt;
-if $rows != 2 then
- print ======$rows
- return -1
-endi
-if $data01 != 2 then
- print ======$data01
- return -1
-endi
-if $data11 != 5 then
- print ======$data11
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
return -1
endi
+sql select * from streamt;
+if $rows != 2 then
+ print ======$rows
+ goto loop3
+endi
+if $data01 != 2 then
+ print ======$data01
+ goto loop3
+endi
+if $data11 != 5 then
+ print ======$data11
+ goto loop3
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/tsim/stream/udTableAndTag2.sim b/tests/script/tsim/stream/udTableAndTag2.sim
index 5dd2e3ae2b..bacc301ad0 100644
--- a/tests/script/tsim/stream/udTableAndTag2.sim
+++ b/tests/script/tsim/stream/udTableAndTag2.sim
@@ -361,6 +361,155 @@ if $data02 != NULL then
goto loop8
endi
+
+print ===== step6
+print ===== table name
+
+sql create database result5 vgroups 1;
+
+sql create database test5 vgroups 1;
+sql use test5;
+
+
+sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+
+sql create stream streams51 trigger at_once into result5.streamt51 SUBTABLE("aaa") as select _wstart, count(*) c1 from st interval(10s);
+sql create stream streams52 trigger at_once into result5.streamt52 TAGS(cc varchar(100)) as select _wstart, count(*) c1 from st interval(10s);
+sql create stream streams53 trigger at_once into result5.streamt53 TAGS(dd varchar(100)) SUBTABLE(concat("aaa-", "1") ) as select _wstart, count(*) c1 from st interval(10s);
+
+sql insert into t1 values(1648791213000,1,2,3);
+sql insert into t2 values(1648791213000,2,2,3);
+
+$loop_count = 0
+loop9:
+
+sleep 300
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+print select table_name from information_schema.ins_tables where db_name="result5" order by 1;
+
+sql select table_name from information_schema.ins_tables where db_name="result5" order by 1;
+
+if $rows != 3 then
+ print =====rows=$rows
+ print $data00
+ print $data10
+ print $data20
+ print $data40
+ goto loop9
+endi
+
+if $data00 != aaa then
+ print =====data00=$data00
+ goto loop9
+endi
+
+if $data10 != aaa-1 then
+ print =====data00=$data00
+ goto loop9
+endi
+
+$loop_count = 0
+loop10:
+
+sleep 300
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+print sql select tag_name from information_schema.ins_tags where db_name="result5" and stable_name = "streamt52" order by 1;
+
+sql select tag_name from information_schema.ins_tags where db_name="result5" and stable_name = "streamt52" order by 1;
+
+if $rows != 1 then
+ print =====rows=$rows
+ print $data00
+ print $data10
+ goto loop10
+endi
+
+if $data00 != cc then
+ print =====data00=$data00
+ goto loop10
+endi
+
+print sql select tag_name from information_schema.ins_tags where db_name="result5" and stable_name = "streamt53" order by 1;
+
+sql select tag_name from information_schema.ins_tags where db_name="result5" and stable_name = "streamt53" order by 1;
+
+if $rows != 1 then
+ print =====rows=$rows
+ print $data00
+ print $data10
+ goto loop10
+endi
+
+if $data00 != dd then
+ print =====data00=$data00
+ goto loop10
+endi
+
+
+
+
+
+$loop_count = 0
+loop11:
+
+sleep 300
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+sql select * from result5.streamt51;
+
+if $rows != 1 then
+ print =====rows=$rows
+ print $data00 $data10
+ goto loop11
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop11
+endi
+
+sql select * from result5.streamt52;
+
+if $rows != 1 then
+ print =====rows=$rows
+ print $data00 $data10
+ goto loop11
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop11
+endi
+
+sql select * from result5.streamt53;
+
+if $rows != 1 then
+ print =====rows=$rows
+ print $data00 $data10
+ goto loop11
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop11
+endi
+
print ======over
system sh/stop_dnodes.sh