Merge remote-tracking branch 'origin/3.0' into feat/TD-27337

This commit is contained in:
dapan1121 2023-12-01 08:33:04 +08:00
commit 3b4a8f6ead
137 changed files with 5999 additions and 2731 deletions

View File

@ -355,7 +355,7 @@ pipeline {
} }
parallel { parallel {
stage('check docs') { stage('check docs') {
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_52 || worker03 || slave215 || slave217 || slave219 || Mac_catalina "} agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 || Mac_catalina "}
steps { steps {
check_docs() check_docs()
} }
@ -401,7 +401,7 @@ pipeline {
} }
} }
stage('linux test') { stage('linux test') {
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_52 || worker03 || slave215 || slave217 || slave219 "} agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 "}
options { skipDefaultCheckout() } options { skipDefaultCheckout() }
when { when {
changeRequest() changeRequest()

View File

@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.0) cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE FALSE) set(CMAKE_VERBOSE_MAKEFILE TRUE)
set(TD_BUILD_TAOSA_INTERNAL FALSE) set(TD_BUILD_TAOSA_INTERNAL FALSE)
#set output directory #set output directory
@ -159,6 +159,7 @@ ELSE ()
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2) CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F) CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI) CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
IF (COMPILER_SUPPORT_SSE42) IF (COMPILER_SUPPORT_SSE42)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
@ -166,11 +167,11 @@ ELSE ()
ENDIF() ENDIF()
IF ("${SIMD_SUPPORT}" MATCHES "true") IF ("${SIMD_SUPPORT}" MATCHES "true")
IF (COMPILER_SUPPORT_FMA) IF (COMPILER_SUPPORT_FMA)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
ENDIF() ENDIF()
IF (COMPILER_SUPPORT_AVX) IF (COMPILER_SUPPORT_AVX)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
ENDIF() ENDIF()
@ -183,7 +184,13 @@ ELSE ()
IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI) IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi") SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
MESSAGE(STATUS "avx512 supported by gcc") MESSAGE(STATUS "avx512f/avx512bmi supported by compiler")
ENDIF()
IF (COMPILER_SUPPORT_AVX512VL)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vl")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512vl")
MESSAGE(STATUS "avx512vl supported by compiler")
ENDIF() ENDIF()
ENDIF() ENDIF()

View File

@ -151,6 +151,7 @@ IF(${BUILD_S3})
IF(${BUILD_WITH_S3}) IF(${BUILD_WITH_S3})
add_definitions(-DUSE_S3)
option(BUILD_WITH_COS "If build with cos" OFF) option(BUILD_WITH_COS "If build with cos" OFF)
ELSE () ELSE ()

View File

@ -243,7 +243,7 @@ You can use the TDengine CLI to monitor your TDengine deployment and execute ad
taos taos
``` ```
The TDengine CLI displays a welcome message and version information to indicate that its connection to the TDengine service was successful. If an error message is displayed, see the [FAQ](/train-faq/faq) for troubleshooting information. At the following prompt, you can execute SQL commands. The TDengine CLI displays a welcome message and version information to indicate that its connection to the TDengine service was successful. If an error message is displayed, see the [FAQ](../../train-faq/faq) for troubleshooting information. At the following prompt, you can execute SQL commands.
```cmd ```cmd
taos> taos>

View File

@ -10,7 +10,7 @@ Between official releases, beta versions may be released that contain new featur
<PkgList type={0}/> <PkgList type={0}/>
For information about installing TDengine, see [Install and Uninstall](/operation/pkg-install). For information about installing TDengine, see [Install and Uninstall](../../operation/pkg-install).
For information about TDengine releases, see [All Downloads](https://tdengine.com/all-downloads) For information about TDengine releases, see [All Downloads](https://tdengine.com/all-downloads)

View File

@ -12,7 +12,7 @@ import StackOverflowSVG from './stackoverflow.svg'
You can install and run TDengine on Linux/Windows/macOS machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud. You can install and run TDengine on Linux/Windows/macOS machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud.
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter). The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](../reference/rest-api) through [taosAdapter](../reference/taosadapter).
```mdx-code-block ```mdx-code-block
import DocCardList from '@theme/DocCardList'; import DocCardList from '@theme/DocCardList';

View File

@ -12,4 +12,4 @@ When using REST connection, the feature of bulk pulling can be enabled if the si
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}} {{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
``` ```
More configuration about connection, please refer to [Java Connector](/reference/connector/java) More configuration about connection, please refer to [Java Connector](../../reference/connector/java)

View File

@ -22,7 +22,7 @@ import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx";
import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx"; import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx";
import VerifyMacOS from "../../14-reference/03-connector/_verify_macos.mdx"; import VerifyMacOS from "../../14-reference/03-connector/_verify_macos.mdx";
Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](/reference/rest-api/). Applications can also use the connectors for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These connectors support connecting to TDengine clusters using both native interfaces (taosc). Some connectors also support connecting over a REST interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector. Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](../../reference/rest-api/). Applications can also use the connectors for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These connectors support connecting to TDengine clusters using both native interfaces (taosc). Some connectors also support connecting over a REST interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
## Establish Connection ## Establish Connection
@ -36,7 +36,7 @@ For REST and native connections, connectors provide similar APIs for performing
Key differences: Key differences:
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade. 3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc. 1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](../../reference/connector/cpp#parameter-binding-api), [Subscription](../../reference/connector/cpp#subscription-and-consumption-api), etc.
## Install Client Driver taosc ## Install Client Driver taosc

View File

@ -3,9 +3,9 @@ title: Data Model
description: This document describes the data model of TDengine. description: This document describes the data model of TDengine.
--- ---
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details. The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](../../concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
Note: before you read this chapter, please make sure you have already read through [Key Concepts](/concept/), since TDengine introduces new concepts like "one table for one [data collection point](/concept/#data-collection-point)" and "[super table](/concept/#super-table-stable)". Note: before you read this chapter, please make sure you have already read through [Key Concepts](../../concept/), since TDengine introduces new concepts like "one table for one [data collection point](../../concept/#data-collection-point)" and "[super table](../../concept/#super-table-stable)".
## Create Database ## Create Database
@ -22,7 +22,7 @@ In the above SQL statement:
- a new data file will be created every 10 days - a new data file will be created every 10 days
- the size of the write cache pool on each VNode is 16 MB - the size of the write cache pool on each VNode is 16 MB
- the number of vgroups is 100 - the number of vgroups is 100
- WAL is enabled but fsync is disabled For more details please refer to [Database](/taos-sql/database). - WAL is enabled but fsync is disabled For more details please refer to [Database](../../taos-sql/database).
After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`. After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`.
@ -41,13 +41,13 @@ Without the current database specified, table name must be preceded with the cor
## Create STable ## Create STable
In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/concept/#model_table1), the SQL statement below can be used to create the super table. In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](../../concept/#model_table1), the SQL statement below can be used to create the super table.
```sql ```sql
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int); CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
``` ```
Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](/taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details. Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](../../taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](../../taos-sql/stable) for more details.
For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices. For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices.
@ -61,7 +61,7 @@ A specific table needs to be created for each data collection point. Similar to
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2); CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
``` ```
In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details. In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](../../taos-sql/table) for details.
It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value. It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value.
@ -75,7 +75,7 @@ INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now,
In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"California.SanFrancisco", 2`. In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"California.SanFrancisco", 2`.
For more details please refer to [Create Table Automatically](/taos-sql/insert#automatically-create-table-when-inserting). For more details please refer to [Create Table Automatically](../../taos-sql/insert#automatically-create-table-when-inserting).
## Single Column vs Multiple Column ## Single Column vs Multiple Column

View File

@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001".
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31); INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31);
``` ```
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). `ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](../../../taos-sql/insert).
### Insert Multiple Rows ### Insert Multiple Rows
@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser
INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25); INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25);
``` ```
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). `ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](../../../taos-sql/insert).
### Insert into Multiple Tables ### Insert into Multiple Tables
@ -53,9 +53,9 @@ Data can be inserted into multiple tables in the same SQL statement. The example
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31); INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31);
``` ```
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert). `ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](../../../taos-sql/insert).
For more details about `INSERT` please refer to [INSERT](/taos-sql/insert). For more details about `INSERT` please refer to [INSERT](../../../taos-sql/insert).
:::info :::info

View File

@ -35,7 +35,7 @@ bin/kafka-topics.sh --bootstrap-server=localhost:9092 --describe
## Insert into TDengine ## Insert into TDengine
We can write data into TDengine via SQL or Schemaless. For more information, please refer to [Insert Using SQL](/develop/insert-data/sql-writing/) or [High Performance Writing](/develop/insert-data/high-volume/) or [Schemaless Writing](/reference/schemaless/). We can write data into TDengine via SQL or Schemaless. For more information, please refer to [Insert Using SQL](../sql-writing/) or [High Performance Writing](../high-volume/) or [Schemaless Writing](../../../reference/schemaless/).
## Examples ## Examples

View File

@ -46,7 +46,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
::: :::
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol) For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](../../../reference/schemaless/#Schemaless-Line-Protocol)
## Examples ## Examples

View File

@ -128,7 +128,7 @@ For more information, see [Aggregate by Window](../../taos-sql/distinguished).
### Query ### Query
In the section describing [Insert](/develop/insert-data/sql-writing), a database named `power` is created and some data are inserted into STable `meters`. Below sample code demonstrates how to query the data in this STable. In the section describing [Insert](../insert-data/sql-writing), a database named `power` is created and some data are inserted into STable `meters`. Below sample code demonstrates how to query the data in this STable.
<Tabs defaultValue="java" groupId="lang"> <Tabs defaultValue="java" groupId="lang">
<TabItem label="Java" value="java"> <TabItem label="Java" value="java">

View File

@ -6,7 +6,7 @@ description: This document describes how to use the various components of TDengi
Before creating an application to process time-series data with TDengine, consider the following: Before creating an application to process time-series data with TDengine, consider the following:
1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has connectors for a variety of languages. 1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has connectors for a variety of languages.
2. Design the data model based on your own use cases. Consider the main [concepts](/concept/) of TDengine, including "one table per data collection point" and the supertable. Learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you decide to create one or more databases and design a supertable schema that fit your data. 2. Design the data model based on your own use cases. Consider the main [concepts](../concept/) of TDengine, including "one table per data collection point" and the supertable. Learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you decide to create one or more databases and design a supertable schema that fit your data.
3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually. 3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually.
4. Based on business requirements, find out what SQL query statements need to be written. You may be able to repurpose any existing SQL. 4. Based on business requirements, find out what SQL query statements need to be written. You may be able to repurpose any existing SQL.
5. If you want to run real-time analysis based on time series data, including various dashboards, use the TDengine stream processing component instead of deploying complex systems such as Spark or Flink. 5. If you want to run real-time analysis based on time series data, including various dashboards, use the TDengine stream processing component instead of deploying complex systems such as Spark or Flink.
@ -14,7 +14,7 @@ Before creating an application to process time-series data with TDengine, consid
7. In many use cases (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately. 7. In many use cases (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately.
8. If you find that the SQL functions of TDengine cannot meet your requirements, then you can use user-defined functions to solve the problem. 8. If you find that the SQL functions of TDengine cannot meet your requirements, then you can use user-defined functions to solve the problem.
This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](/taos-sql/). For a more in-depth understanding of the use of each connector, please read the [Connector Reference Guide](/reference/connector/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](/third-party/). This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](../taos-sql/). For a more in-depth understanding of the use of each connector, please read the [Connector Reference Guide](../reference/connector/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](../third-party/).
If you encounter any problems during the development process, please click ["Submit an issue"](https://github.com/taosdata/TDengine/issues/new/choose) at the bottom of each page and submit it on GitHub right away. If you encounter any problems during the development process, please click ["Submit an issue"](https://github.com/taosdata/TDengine/issues/new/choose) at the bottom of each page and submit it on GitHub right away.

View File

@ -72,7 +72,7 @@ For all the dnodes in a TDengine cluster, the below parameters must be configure
## Start Cluster ## Start Cluster
The first dnode can be started following the instructions in [Get Started](/get-started/). Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example: The first dnode can be started following the instructions in [Get Started](../../get-started/). Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example:
``` ```
taos> show dnodes; taos> show dnodes;
@ -90,7 +90,7 @@ From the above output, it is shown that the end point of the started dnode is "h
There are a few steps necessary to add other dnodes in the cluster. There are a few steps necessary to add other dnodes in the cluster.
Second, we can start `taosd` as instructed in [Get Started](/get-started/). Second, we can start `taosd` as instructed in [Get Started](../../get-started/).
Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `taos` to execute the following command: Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `taos` to execute the following command:

View File

@ -53,7 +53,7 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64; CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
``` ```
For more information about user-defined functions, see [User-Defined Functions](/develop/udf). For more information about user-defined functions, see [User-Defined Functions](../../develop/udf).
## Manage UDF ## Manage UDF

View File

@ -41,7 +41,7 @@ Launch `TDinsight.sh` with the command above and restart Grafana, then open Dash
## log database ## log database
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](/reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup. The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](../../reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
### cluster\_info table ### cluster\_info table

View File

@ -514,4 +514,4 @@ Response body:
## Reference ## Reference
[taosAdapter](/reference/taosadapter/) [taosAdapter](../taosadapter/)

View File

@ -24,7 +24,7 @@ The dynamic libraries for the TDengine client driver are located in.
## Supported platforms ## Supported platforms
Please refer to [list of supported platforms](/reference/connector#supported-platforms) Please refer to [list of supported platforms](../#supported-platforms)
## Supported versions ## Supported versions
@ -32,7 +32,7 @@ The version number of the TDengine client driver and the version number of the T
## Installation Steps ## Installation Steps
Please refer to the [Installation Steps](/reference/connector#installation-steps) for TDengine client driver installation Please refer to the [Installation Steps](../#installation-steps) for TDengine client driver installation
## Establishing a connection ## Establishing a connection
@ -394,7 +394,7 @@ The specific functions related to the interface are as follows (see also the [pr
### Schemaless Writing API ### Schemaless Writing API
In addition to writing data using the SQL method or the parameter binding API, writing can also be done using schemaless writing, which eliminates the need to create a super table/data sub-table structure in advance and writes the data directly. The TDengine system automatically creates and maintains the required table structure based on the written data content. The use of schemaless writing is described in the chapter [Schemaless Writing](/reference/schemaless/), and the C/C++ API used with it is described here. In addition to writing data using the SQL method or the parameter binding API, writing can also be done using schemaless writing, which eliminates the need to create a super table/data sub-table structure in advance and writes the data directly. The TDengine system automatically creates and maintains the required table structure based on the written data content. The use of schemaless writing is described in the chapter [Schemaless Writing](../../schemaless/), and the C/C++ API used with it is described here.
- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` - `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)`

View File

@ -148,7 +148,7 @@ TDengine currently supports timestamp, number, character, Boolean type, and the
**Note**: Only TAG supports JSON types **Note**: Only TAG supports JSON types
Due to historical reasons, the BINARY type data in TDengine is not truly binary data and is no longer recommended for use. Please use VARBINARY type instead. Due to historical reasons, the BINARY type data in TDengine is not truly binary data and is no longer recommended for use. Please use VARBINARY type instead.
GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type] (/tao-sql/data-type/#Data Types) GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type](../../../taos-sql/data-type/)
For WKB specifications, please refer to [Well Known Binary (WKB)] https://libgeos.org/specifications/wkb/ For WKB specifications, please refer to [Well Known Binary (WKB)] https://libgeos.org/specifications/wkb/
For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example]https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example]https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java
@ -160,7 +160,7 @@ For Java connector, the jts library can be used to easily create GEOMETRY type o
Before using Java Connector to connect to the database, the following conditions are required. Before using Java Connector to connect to the database, the following conditions are required.
- Java 1.8 or above runtime environment and Maven 3.6 or above installed - Java 1.8 or above runtime environment and Maven 3.6 or above installed
- TDengine client driver installed (required for native connections, not required for REST connections), please refer to [Installing Client Driver](/reference/connector#Install-Client-Driver) - TDengine client driver installed (required for native connections, not required for REST connections), please refer to [Installing Client Driver](../#Install-Client-Driver)
### Install the connectors ### Install the connectors
@ -368,7 +368,7 @@ The configuration parameters in properties are as follows.
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true. - TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection. - TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection.
- TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20. - TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20.
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only). For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](../../config/#Client-Only).
### Priority of configuration parameters ### Priority of configuration parameters

View File

@ -74,7 +74,7 @@ If it is a TDengine error, you can get the error code and error information in t
### Pre-installation preparation ### Pre-installation preparation
* Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above) * Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above)
* If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps * If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](../#install-client-driver) for specific steps
Configure the environment variables and check the command. Configure the environment variables and check the command.

View File

@ -80,7 +80,7 @@ Note: Only TAG supports JSON types
### Pre-installation preparation ### Pre-installation preparation
* Install the Rust development toolchain * Install the Rust development toolchain
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver) * If using the native connection, please install the TDengine client driver. Please refer to [install client driver](../#install-client-driver)
### Install the connectors ### Install the connectors

View File

@ -7,7 +7,7 @@ description: This document describes taospy, the TDengine Python connector.
import Tabs from "@theme/Tabs"; import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem"; import TabItem from "@theme/TabItem";
`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively. `taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](../cpp) and [REST interface](../../rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/). In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
`taos-ws-py` is an optional package to enable using WebSocket to connect TDengine. `taos-ws-py` is an optional package to enable using WebSocket to connect TDengine.
@ -17,7 +17,7 @@ The direct connection to the server using the native interface provided by the c
The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
## Supported platforms ## Supported platforms
- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client. - The [supported platforms](../#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
- REST connections are supported on all platforms that can run Python. - REST connections are supported on all platforms that can run Python.
### Supported features ### Supported features
@ -95,7 +95,7 @@ TDengine currently supports timestamp, number, character, Boolean type, and the
1. Install Python. The recent taospy package requires Python 3.6.2+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it. 1. Install Python. The recent taospy package requires Python 3.6.2+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it. 2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI. If you use a native connection, you will also need to [Install Client Driver](../#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
### Install via pip ### Install via pip
@ -444,7 +444,7 @@ The best practice for TaosCursor is to create a cursor at the beginning of a que
##### Use of the RestClient class ##### Use of the RestClient class
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python title="Use of RestClient" ```python title="Use of RestClient"
{{#include docs/examples/python/rest_client_example.py}} {{#include docs/examples/python/rest_client_example.py}}
@ -501,7 +501,7 @@ The queried results can only be fetched once. For example, only one of `fetch_al
<TabItem value="rest" label="REST connection"> <TabItem value="rest" label="REST connection">
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python ```python
{{#include docs/examples/python/rest_client_example.py}} {{#include docs/examples/python/rest_client_example.py}}
@ -561,7 +561,7 @@ The `TaosConnection` class and the `TaosResult` class already implement all the
##### Use of the RestClient class ##### Use of the RestClient class
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result. The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python title="Use of RestClient" ```python title="Use of RestClient"
{{#include docs/examples/python/rest_client_with_req_id_example.py}} {{#include docs/examples/python/rest_client_with_req_id_example.py}}

View File

@ -28,7 +28,7 @@ The REST connector supports all platforms that can run Node.js.
## Version support ## Version support
Please refer to [version support list](/reference/connector#version-support) Please refer to [version support list](../#version-support)
## Supported features ## Supported features
@ -58,7 +58,7 @@ Please refer to [version support list](/reference/connector#version-support)
### Pre-installation preparation ### Pre-installation preparation
- Install the Node.js development environment - Install the Node.js development environment
- If you are using the REST connector, skip this step. However, if you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS. - If you are using the REST connector, skip this step. However, if you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](../#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS.
<Tabs defaultValue="Linux"> <Tabs defaultValue="Linux">
<TabItem value="Linux" label="Linux system installation dependencies"> <TabItem value="Linux" label="Linux system installation dependencies">

View File

@ -36,7 +36,7 @@ Please note TDengine does not support 32bit Windows any more.
## Version support ## Version support
Please refer to [version support list](/reference/connector#version-support) Please refer to [version support list](../#version-support)
## Supported features ## Supported features
@ -69,7 +69,7 @@ Please refer to [version support list](/reference/connector#version-support)
* Install the [.NET SDK](https://dotnet.microsoft.com/download) * Install the [.NET SDK](https://dotnet.microsoft.com/download)
* [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation) * [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation)
* Install TDengine client driver, please refer to [Install client driver](/reference/connector/#install-client-driver) for details * Install TDengine client driver, please refer to [Install client driver](../#install-client-driver) for details
### Install `TDengine.Connector` ### Install `TDengine.Connector`

View File

@ -40,7 +40,7 @@ Because the version of TDengine client driver is tightly associated with that of
### Install TDengine Client Driver ### Install TDengine Client Driver
Regarding how to install TDengine client driver please refer to [Install Client Driver](/reference/connector#installation-steps) Regarding how to install TDengine client driver please refer to [Install Client Driver](../#installation-steps)
### Install php-tdengine ### Install php-tdengine

View File

@ -2,7 +2,7 @@
:::info :::info
Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package). Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](../../get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package).
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately. - libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately. - taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.

View File

@ -186,7 +186,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
### TDengine RESTful interface ### TDengine RESTful interface
You can use any client that supports the http protocol to write data to or query data from TDengine by accessing the REST interface address `http://<fqdn>:6041/rest/sql`. See the [official documentation](/reference/rest-api/) for details. You can use any client that supports the http protocol to write data to or query data from TDengine by accessing the REST interface address `http://<fqdn>:6041/rest/sql`. See the [official documentation](../rest-api/) for details.
### InfluxDB ### InfluxDB
@ -202,7 +202,7 @@ Support InfluxDB query parameters as follows.
- `precision` The time precision used by TDengine - `precision` The time precision used by TDengine
- `u` TDengine user name - `u` TDengine user name
- `p` TDengine password - `p` TDengine password
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the first data of sub-table to create sub-table. For more information, please refer [Create Table](/taos-sql/table/#create-table) - `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the first data of sub-table to create sub-table. For more information, please refer [Create Table](../../taos-sql/table/#create-table)
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported. Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000" Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"

View File

@ -31,7 +31,7 @@ There are two ways to install taosdump:
2. backup multiple specified databases: use `-D db1,db2,... ` parameters; 2. backup multiple specified databases: use `-D db1,db2,... ` parameters;
3. back up some super or normal tables in the specified database: use `dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces. 3. back up some super or normal tables in the specified database: use `dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces.
4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter. 4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter.
5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters. 5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](../../taos-sql/escape) for a description of escaped characters.
:::tip :::tip
- taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema. - taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema.

View File

@ -8,7 +8,7 @@ The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is
## Installation ## Installation
If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [Connector](/reference/connector/). If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [Connector](../connector/).
## Execution ## Execution
@ -18,7 +18,7 @@ To access the TDengine CLI, you can execute `taos` command-line utility from a t
taos taos
``` ```
TDengine CLI will display a welcome message and version information if it successfully connected to the TDengine service. If it fails, TDengine CLI will print an error message. See [FAQ](/train-faq/faq) to solve the problem of terminal connection failure to the server. The TDengine CLI prompts as follows: TDengine CLI will display a welcome message and version information if it successfully connected to the TDengine service. If it fails, TDengine CLI will print an error message. See [FAQ](../../train-faq/faq) to solve the problem of terminal connection failure to the server. The TDengine CLI prompts as follows:
```cmd ```cmd
taos> taos>

View File

@ -87,7 +87,7 @@ Ensure that your firewall rules do not block TCP port 6042 on any host in the c
| Protocol | Default Port | Description | How to configure | | Protocol | Default Port | Description | How to configure |
| :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- | | :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- |
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort | | TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) | | TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](../taosadapter/) |
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper | | TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters. | | TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters. |
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters. | | UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters. |

View File

@ -116,7 +116,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
10. Taos.cfg adds the configuration of smlTsDefaultName (with a string value), which only works on the client side. After configuration, the time column name of the schemaless automatic table creation can be set through this configuration. If not configured, defaults to _ts. 10. Taos.cfg adds the configuration of smlTsDefaultName (with a string value), which only works on the client side. After configuration, the time column name of the schemaless automatic table creation can be set through this configuration. If not configured, defaults to _ts.
11. Super table name or child table name are case sensitive. 11. Super table name or child table name are case sensitive.
:::tip :::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](../../taos-sql/limit) for specific constraints in this area.
::: :::
## Time resolution recognition ## Time resolution recognition

View File

@ -16,7 +16,7 @@ Prometheus data can be stored in TDengine via the `remote_write` interface with
To write Prometheus data to TDengine requires the following preparations. To write Prometheus data to TDengine requires the following preparations.
- The TDengine cluster is deployed and functioning properly - The TDengine cluster is deployed and functioning properly
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details. - taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
- Prometheus has been installed. Please refer to the [official documentation](https://prometheus.io/docs/prometheus/latest/installation/) for installing Prometheus - Prometheus has been installed. Please refer to the [official documentation](https://prometheus.io/docs/prometheus/latest/installation/) for installing Prometheus
## Configuration steps ## Configuration steps

View File

@ -14,7 +14,7 @@ Telegraf's data can be written to TDengine by simply adding the output configura
To write Telegraf data to TDengine requires the following preparations. To write Telegraf data to TDengine requires the following preparations.
- The TDengine cluster is deployed and functioning properly - The TDengine cluster is deployed and functioning properly
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details. - taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
- Telegraf has been installed. Please refer to the [official documentation](https://docs.influxdata.com/telegraf/v1.22/install/) for Telegraf installation. - Telegraf has been installed. Please refer to the [official documentation](https://docs.influxdata.com/telegraf/v1.22/install/) for Telegraf installation.
- Telegraf collects the running status measurements of current system. You can enable [input plugins](https://docs.influxdata.com/telegraf/v1.22/plugins/) to insert [other formats](https://docs.influxdata.com/telegraf/v1.24/data_formats/input/) data to Telegraf then forward to TDengine. - Telegraf collects the running status measurements of current system. You can enable [input plugins](https://docs.influxdata.com/telegraf/v1.22/plugins/) to insert [other formats](https://docs.influxdata.com/telegraf/v1.24/data_formats/input/) data to Telegraf then forward to TDengine.
@ -73,6 +73,6 @@ Query OK, 3 row(s) in set (0.013269s)
- TDengine take influxdb format data and create unique ID for table names by the rule. - TDengine take influxdb format data and create unique ID for table names by the rule.
The user can configure `smlChildTableName` parameter to generate specified table names if he/she needs. And he/she also need to insert data with specified data format. The user can configure `smlChildTableName` parameter to generate specified table names if he/she needs. And he/she also need to insert data with specified data format.
For example, Add `smlChildTableName=tname` in the taos.cfg file. Insert data `st,tname=cpu1,t1=4 c1=3 1626006833639000000` then the table name will be cpu1. If there are multiple lines has same tname but different tag_set, the first line's tag_set will be used to automatically creating table and ignore other lines. Please refer to [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol) For example, Add `smlChildTableName=tname` in the taos.cfg file. Insert data `st,tname=cpu1,t1=4 c1=3 1626006833639000000` then the table name will be cpu1. If there are multiple lines has same tname but different tag_set, the first line's tag_set will be used to automatically creating table and ignore other lines. Please refer to [TDengine Schemaless](../../reference/schemaless/#Schemaless-Line-Protocol)
::: :::

View File

@ -15,7 +15,7 @@ You can write the data collected by collectd to TDengine by simply modifying the
Writing collectd data to the TDengine requires several preparations. Writing collectd data to the TDengine requires several preparations.
- The TDengine cluster is deployed and running properly - The TDengine cluster is deployed and running properly
- taosAdapter is installed and running, please refer to [taosAdapter's manual](/reference/taosadapter) for details - taosAdapter is installed and running, please refer to [taosAdapter's manual](../../reference/taosadapter) for details
- collectd has been installed. Please refer to the [official documentation](https://collectd.org/download.shtml) to install collectd - collectd has been installed. Please refer to the [official documentation](https://collectd.org/download.shtml) to install collectd
## Configuration steps ## Configuration steps

View File

@ -14,7 +14,7 @@ You can write the data collected by icinga2 to TDengine by simply modifying the
To write icinga2 data to TDengine requires the following preparations. To write icinga2 data to TDengine requires the following preparations.
- The TDengine cluster is deployed and working properly - The TDengine cluster is deployed and working properly
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details. - taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
- icinga2 has been installed. Please refer to the [official documentation](https://icinga.com/docs/icinga-2/latest/doc/02-installation/) for icinga2 installation - icinga2 has been installed. Please refer to the [official documentation](https://icinga.com/docs/icinga-2/latest/doc/02-installation/) for icinga2 installation
## Configuration steps ## Configuration steps

View File

@ -14,7 +14,7 @@ You can write the data collected by TCollector to TDengine by simply changing th
To write data to the TDengine via TCollector requires the following preparations. To write data to the TDengine via TCollector requires the following preparations.
- The TDengine cluster has been deployed and is working properly - The TDengine cluster has been deployed and is working properly
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details. - taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
- TCollector has been installed. Please refer to [official documentation](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html#installation-of-tcollector) for TCollector installation - TCollector has been installed. Please refer to [official documentation](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html#installation-of-tcollector) for TCollector installation
## Configuration steps ## Configuration steps

View File

@ -82,7 +82,7 @@ Edit the resource configuration to add the key/value pairing for Authorization.
Basic cm9vdDp0YW9zZGF0YQ== Basic cm9vdDp0YW9zZGF0YQ==
``` ```
Please refer to the [ TDengine REST API documentation ](/reference/rest-api/) for the authorization in details. Please refer to the [ TDengine REST API documentation ](../../reference/rest-api/) for the authorization in details.
Enter the rule engine replacement template in the message body: Enter the rule engine replacement template in the message body:

View File

@ -94,7 +94,7 @@ The output as bellow:
The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix). The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix).
TDengine Sink Connector internally uses TDengine [modeless write interface](/reference/connector/cpp#modeless write-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](/develop /insert-data/influxdb-line), [OpenTSDB Telnet protocol format](/develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json). TDengine Sink Connector internally uses TDengine [modeless write interface](../../reference/connector/cpp#modeless write-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](../../develop/insert-data/influxdb-line), [OpenTSDB Telnet protocol format](../../develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](../../develop/insert-data/opentsdb-json).
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format. The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
@ -213,7 +213,7 @@ If you see the above data, the synchronization is successful. If not, check the
The role of the TDengine Source Connector is to push all the data of a specific TDengine database after a particular time to Kafka. The implementation principle of TDengine Source Connector is to first pull historical data in batches and then synchronize incremental data with the strategy of the regular query. At the same time, the changes in the table will be monitored, and the newly added table can be automatically synchronized. If Kafka Connect is restarted, synchronization will resume where it left off. The role of the TDengine Source Connector is to push all the data of a specific TDengine database after a particular time to Kafka. The implementation principle of TDengine Source Connector is to first pull historical data in batches and then synchronize incremental data with the strategy of the regular query. At the same time, the changes in the table will be monitored, and the newly added table can be automatically synchronized. If Kafka Connect is restarted, synchronization will resume where it left off.
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](/develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json ) and then write to Kafka. TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](../../develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](../../develop/insert-data/opentsdb-json ) and then write to Kafka.
The following sample program synchronizes the data in the database test to the topic tdengine-test-meters. The following sample program synchronizes the data in the database test to the topic tdengine-test-meters.

View File

@ -14,4 +14,4 @@ Check the running status of taosAdapter.
systemctl status taosadapter systemctl status taosadapter
``` ```
taosAdapter Please refer to the `taosadapter --help` command output and [reference documentation](/reference/taosadapter) for detailed configuration parameters and usage of taosAdapter. taosAdapter Please refer to the `taosadapter --help` command output and [reference documentation](../../reference/taosadapter) for detailed configuration parameters and usage of taosAdapter.

View File

@ -41,7 +41,7 @@ Download and install the [latest version of TDengine](https://docs.tdengine.com/
### Install Grafana Plugin and Configure Data Source ### Install Grafana Plugin and Configure Data Source
Please refer to [Install Grafana Plugin and Configure Data Source](/third-party/grafana/#install-grafana-plugin-and-configure-data-source) Please refer to [Install Grafana Plugin and Configure Data Source](../../third-party/grafana/#install-grafana-plugin-and-configure-data-source)
### Modify /etc/telegraf/telegraf.conf ### Modify /etc/telegraf/telegraf.conf

View File

@ -44,7 +44,7 @@ Download and install the [latest version of TDengine](https://docs.tdengine.com/
### Install Grafana Plugin and Configure Data Source ### Install Grafana Plugin and Configure Data Source
Please refer to [Install Grafana Plugin and Configure Data Source](/third-party/grafana/#install-grafana-plugin-and-configure-data-source) Please refer to [Install Grafana Plugin and Configure Data Source](../../third-party/grafana/#install-grafana-plugin-and-configure-data-source)
### Configure collectd ### Configure collectd

View File

@ -70,7 +70,7 @@ You can use collectd and push the data to taosAdapter utilizing the write_tsdb p
- **Tuning the Dashboard system** - **Tuning the Dashboard system**
After writing the data to TDengine, you can configure Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana). After writing the data to TDengine, you can configure Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](../../third-party/grafana).
TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use. TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use.
@ -396,7 +396,7 @@ Hard disk writing performance has little effect on TDengine. The TDengine writin
### Computational resource estimates ### Computational resource estimates
Due to the characteristics of IoT data, when the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second. Due to the characteristics of IoT data, when the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](../../operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second.
In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many cores i.e. 20 cores. In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many cores i.e. 20 cores.

View File

@ -43,6 +43,7 @@ int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size,
int32_t s3GetObjectsByPrefix(const char *prefix, const char *path); int32_t s3GetObjectsByPrefix(const char *prefix, const char *path);
void s3EvictCache(const char *path, long object_size); void s3EvictCache(const char *path, long object_size);
long s3Size(const char *object_name); long s3Size(const char *object_name);
int32_t s3GetObjectToFile(const char *object_name, char *fileName);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -75,12 +75,15 @@ extern int32_t tsElectInterval;
extern int32_t tsHeartbeatInterval; extern int32_t tsHeartbeatInterval;
extern int32_t tsHeartbeatTimeout; extern int32_t tsHeartbeatTimeout;
// vnode
extern int64_t tsVndCommitMaxIntervalMs;
// snode // snode
extern int32_t tsRsyncPort; extern int32_t tsRsyncPort;
extern char tsCheckpointBackupDir[]; extern char tsCheckpointBackupDir[];
// vnode checkpoint // vnode checkpoint
extern char tsSnodeAddress[]; //127.0.0.1:873 extern char tsSnodeAddress[]; // 127.0.0.1:873
// mnode // mnode
extern int64_t tsMndSdbWriteDelta; extern int64_t tsMndSdbWriteDelta;
@ -104,8 +107,8 @@ extern int32_t tsMonitorMaxLogs;
extern bool tsMonitorComp; extern bool tsMonitorComp;
// audit // audit
extern bool tsEnableAudit; extern bool tsEnableAudit;
extern bool tsEnableAuditCreateTable; extern bool tsEnableAuditCreateTable;
// telem // telem
extern bool tsEnableTelem; extern bool tsEnableTelem;
@ -113,9 +116,9 @@ extern int32_t tsTelemInterval;
extern char tsTelemServer[]; extern char tsTelemServer[];
extern uint16_t tsTelemPort; extern uint16_t tsTelemPort;
extern bool tsEnableCrashReport; extern bool tsEnableCrashReport;
extern char *tsTelemUri; extern char * tsTelemUri;
extern char *tsClientCrashReportUri; extern char * tsClientCrashReportUri;
extern char *tsSvrCrashReportUri; extern char * tsSvrCrashReportUri;
// query buffer management // query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing

View File

@ -31,8 +31,6 @@ extern "C" {
#endif #endif
#define GRANT_HEART_BEAT_MIN 2 #define GRANT_HEART_BEAT_MIN 2
#define GRANT_ACTIVE_CODE "activeCode"
#define GRANT_C_ACTIVE_CODE "cActiveCode"
typedef enum { typedef enum {
TSDB_GRANT_ALL, TSDB_GRANT_ALL,
@ -52,11 +50,6 @@ typedef enum {
TSDB_GRANT_TABLE, TSDB_GRANT_TABLE,
} EGrantType; } EGrantType;
typedef struct {
int64_t grantedTime;
int64_t connGrantedTime;
} SGrantedInfo;
int32_t grantCheck(EGrantType grant); int32_t grantCheck(EGrantType grant);
int32_t grantAlterActiveCode(int32_t did, const char* old, const char* newer, char* out, int8_t type); int32_t grantAlterActiveCode(int32_t did, const char* old, const char* newer, char* out, int8_t type);

View File

@ -186,6 +186,7 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_CANDIDITATE, "stream-checkpoint-remain", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_NODECHANGE_CHECK, "stream-nodechange-check", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_STREAM_NODECHANGE_CHECK, "stream-nodechange-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GRANT_NOTIFY, "grant-notify", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_GRANT_NOTIFY, "grant-notify", NULL, NULL)

View File

@ -481,6 +481,7 @@ typedef struct SVnodeModifyOpStmt {
SHashObj* pSubTableHashObj; // SHashObj<table_name, STableMeta*> SHashObj* pSubTableHashObj; // SHashObj<table_name, STableMeta*>
SHashObj* pTableNameHashObj; // set of table names for refreshing meta, sync mode SHashObj* pTableNameHashObj; // set of table names for refreshing meta, sync mode
SHashObj* pDbFNameHashObj; // set of db names for refreshing meta, sync mode SHashObj* pDbFNameHashObj; // set of db names for refreshing meta, sync mode
SHashObj* pTableCxtHashObj; // temp SHashObj<tuid, STableDataCxt*> for single request
SArray* pVgDataBlocks; // SArray<SVgroupDataCxt*> SArray* pVgDataBlocks; // SArray<SVgroupDataCxt*>
SVCreateTbReq* pCreateTblReq; SVCreateTbReq* pCreateTblReq;
TdFilePtr fp; TdFilePtr fp;

View File

@ -35,6 +35,7 @@ int32_t streamStateBegin(SStreamState* pState);
int32_t streamStateCommit(SStreamState* pState); int32_t streamStateCommit(SStreamState* pState);
void streamStateDestroy(SStreamState* pState, bool remove); void streamStateDestroy(SStreamState* pState, bool remove);
int32_t streamStateDeleteCheckPoint(SStreamState* pState, TSKEY mark); int32_t streamStateDeleteCheckPoint(SStreamState* pState, TSKEY mark);
int32_t streamStateDelTaskDb(SStreamState* pState);
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen); int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen); int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen);
@ -133,4 +134,4 @@ char* streamStateIntervalDump(SStreamState* pState);
} }
#endif #endif
#endif /* ifndef _STREAM_STATE_H_ */ #endif /* ifndef _STREAM_STATE_H_ */

View File

@ -58,7 +58,9 @@ typedef struct SStreamTask SStreamTask;
typedef struct SStreamQueue SStreamQueue; typedef struct SStreamQueue SStreamQueue;
typedef struct SStreamTaskSM SStreamTaskSM; typedef struct SStreamTaskSM SStreamTaskSM;
#define SSTREAM_TASK_VER 2 #define SSTREAM_TASK_VER 2
#define SSTREAM_TASK_INCOMPATIBLE_VER 1
#define SSTREAM_TASK_NEED_CONVERT_VER 2
enum { enum {
STREAM_STATUS__NORMAL = 0, STREAM_STATUS__NORMAL = 0,
@ -110,6 +112,7 @@ typedef enum {
TASK_LEVEL__SOURCE = 1, TASK_LEVEL__SOURCE = 1,
TASK_LEVEL__AGG, TASK_LEVEL__AGG,
TASK_LEVEL__SINK, TASK_LEVEL__SINK,
TASK_LEVEL_SMA,
} ETASK_LEVEL; } ETASK_LEVEL;
enum { enum {
@ -304,11 +307,16 @@ typedef struct SStreamTaskId {
typedef struct SCheckpointInfo { typedef struct SCheckpointInfo {
int64_t startTs; int64_t startTs;
int64_t checkpointId; int64_t checkpointId;
int64_t checkpointVer; // latest checkpointId version
int64_t processedVer; // already processed ver, that has generated results version. int64_t checkpointVer; // latest checkpointId version
int64_t processedVer;
int64_t nextProcessVer; // current offset in WAL, not serialize it int64_t nextProcessVer; // current offset in WAL, not serialize it
int64_t failedId; // record the latest failed checkpoint id int64_t failedId; // record the latest failed checkpoint id
int64_t checkpointingId;
int32_t downstreamAlignNum;
int32_t checkpointNotReadyTasks;
bool dispatchCheckpointTrigger; bool dispatchCheckpointTrigger;
int64_t msgVer;
} SCheckpointInfo; } SCheckpointInfo;
typedef struct SStreamStatus { typedef struct SStreamStatus {
@ -447,12 +455,11 @@ struct SStreamTask {
int64_t checkReqId; int64_t checkReqId;
SArray* checkReqIds; // shuffle SArray* checkReqIds; // shuffle
int32_t refCnt; int32_t refCnt;
int64_t checkpointingId;
int32_t checkpointAlignCnt;
int32_t checkpointNotReadyTasks;
int32_t transferStateAlignCnt; int32_t transferStateAlignCnt;
struct SStreamMeta* pMeta; struct SStreamMeta* pMeta;
SSHashObj* pNameMap; SSHashObj* pNameMap;
void* pBackend;
int64_t backendRefId;
char reserve[256]; char reserve[256];
}; };
@ -490,20 +497,25 @@ typedef struct SStreamMeta {
int32_t walScanCounter; int32_t walScanCounter;
void* streamBackend; void* streamBackend;
int64_t streamBackendRid; int64_t streamBackendRid;
SHashObj* pTaskBackendUnique; SHashObj* pTaskDbUnique;
TdThreadMutex backendMutex; TdThreadMutex backendMutex;
SMetaHbInfo* pHbInfo; SMetaHbInfo* pHbInfo;
STaskUpdateInfo updateInfo; STaskUpdateInfo updateInfo;
SHashObj* pUpdateTaskSet;
int32_t numOfStreamTasks; // this value should be increased when a new task is added into the meta int32_t numOfStreamTasks; // this value should be increased when a new task is added into the meta
int32_t numOfPausedTasks; int32_t numOfPausedTasks;
int32_t chkptNotReadyTasks;
int64_t rid; int64_t rid;
int64_t chkpId; int64_t chkpId;
int32_t chkpCap;
SArray* chkpSaved; SArray* chkpSaved;
SArray* chkpInUse; SArray* chkpInUse;
int32_t chkpCap;
SRWLatch chkpDirLock; SRWLatch chkpDirLock;
void* qHandle;
int32_t pauseTaskNum;
void* bkdChkptMgt;
} SStreamMeta; } SStreamMeta;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo); int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
@ -659,7 +671,7 @@ int32_t tDecodeStreamCheckpointReadyMsg(SDecoder* pDecoder, SStreamCheckpointRea
typedef struct STaskStatusEntry { typedef struct STaskStatusEntry {
STaskId id; STaskId id;
int32_t status; int32_t status;
int32_t statusLastDuration; // to record the last duration of current status int32_t statusLastDuration; // to record the last duration of current status
int64_t stage; int64_t stage;
int32_t nodeId; int32_t nodeId;
int64_t verStart; // start version in WAL, only valid for source task int64_t verStart; // start version in WAL, only valid for source task
@ -668,10 +680,12 @@ typedef struct STaskStatusEntry {
int32_t relatedHTask; // has related fill-history task int32_t relatedHTask; // has related fill-history task
int64_t activeCheckpointId; // current active checkpoint id int64_t activeCheckpointId; // current active checkpoint id
bool checkpointFailed; // denote if the checkpoint is failed or not bool checkpointFailed; // denote if the checkpoint is failed or not
bool inputQChanging; // inputQ is changing or not
int64_t inputQUnchangeCounter;
double inputQUsed; // in MiB double inputQUsed; // in MiB
double inputRate; double inputRate;
double sinkQuota; // existed quota size for sink task double sinkQuota; // existed quota size for sink task
double sinkDataSize; // sink to dst data size double sinkDataSize; // sink to dst data size
} STaskStatusEntry; } STaskStatusEntry;
typedef struct SStreamHbMsg { typedef struct SStreamHbMsg {
@ -832,8 +846,10 @@ int32_t streamMetaReopen(SStreamMeta* pMeta);
void streamMetaInitBackend(SStreamMeta* pMeta); void streamMetaInitBackend(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta); int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
int32_t streamMetaReloadAllTasks(SStreamMeta* pMeta);
int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta); int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
void streamMetaNotifyClose(SStreamMeta* pMeta); void streamMetaNotifyClose(SStreamMeta* pMeta);
int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key);
void streamMetaStartHb(SStreamMeta* pMeta); void streamMetaStartHb(SStreamMeta* pMeta);
bool streamMetaTaskInTimer(SStreamMeta* pMeta); bool streamMetaTaskInTimer(SStreamMeta* pMeta);
int32_t streamMetaUpdateTaskDownstreamStatus(SStreamTask* pTask, int64_t startTs, int64_t endTs, bool succ); int32_t streamMetaUpdateTaskDownstreamStatus(SStreamTask* pTask, int64_t startTs, int64_t endTs, bool succ);
@ -855,8 +871,10 @@ int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHa
int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SRpcMsg* pMsg, int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SRpcMsg* pMsg,
int8_t isSucceed); int8_t isSucceed);
SStreamTaskSM* streamCreateStateMachine(SStreamTask* pTask);
void* streamDestroyStateMachine(SStreamTaskSM* pSM);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif /* ifndef _STREAM_H_ */ #endif /* ifndef _STREAM_H_ */

View File

@ -558,7 +558,6 @@ int32_t* taosGetErrno();
#define TSDB_CODE_GRANT_GEN_IVLD_KEY TAOS_DEF_ERROR_CODE(0, 0x0812) #define TSDB_CODE_GRANT_GEN_IVLD_KEY TAOS_DEF_ERROR_CODE(0, 0x0812)
#define TSDB_CODE_GRANT_GEN_APP_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0813) #define TSDB_CODE_GRANT_GEN_APP_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0813)
#define TSDB_CODE_GRANT_GEN_ENC_IVLD_KLEN TAOS_DEF_ERROR_CODE(0, 0x0814) #define TSDB_CODE_GRANT_GEN_ENC_IVLD_KLEN TAOS_DEF_ERROR_CODE(0, 0x0814)
#define TSDB_CODE_GRANT_PAR_IVLD_DIST TAOS_DEF_ERROR_CODE(0, 0x0815)
// sync // sync
// #define TSDB_CODE_SYN_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0900) // 2.x // #define TSDB_CODE_SYN_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0900) // 2.x

View File

@ -139,6 +139,8 @@ int32_t getWordLength(char type);
int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, char *const output, const char type); int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, char *const output, const char type);
int32_t tsDecompressFloatImplAvx512(const char *const input, const int32_t nelements, char *const output); int32_t tsDecompressFloatImplAvx512(const char *const input, const int32_t nelements, char *const output);
int32_t tsDecompressFloatImplAvx2(const char *const input, const int32_t nelements, char *const output); int32_t tsDecompressFloatImplAvx2(const char *const input, const int32_t nelements, char *const output);
int32_t tsDecompressTimestampAvx512(const char* const input, const int32_t nelements, char *const output, bool bigEndian);
int32_t tsDecompressTimestampAvx2(const char* const input, const int32_t nelements, char *const output, bool bigEndian);
/************************************************************************* /*************************************************************************
* STREAM COMPRESSION * STREAM COMPRESSION

View File

@ -38,3 +38,4 @@ source /etc/profile
${csudo}mkdir -p ${corePath} ||: ${csudo}mkdir -p ${corePath} ||:
${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||: ${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||: ${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||:
${csudo}echo "kernel.core_pattern = ${corePath}/core_%e-%p" >> /etc/sysctl.conf ||:

View File

@ -126,9 +126,9 @@ void queryCallback(void* param, void* res, int32_t code) {
taos_fetch_raw_block_a(res, fetchCallback, param); taos_fetch_raw_block_a(res, fetchCallback, param);
} }
void createNewTable(TAOS* pConn, int32_t index) { void createNewTable(TAOS* pConn, int32_t index, int32_t numOfRows, int64_t startTs, const char* pVarchar) {
char str[1024] = {0}; char str[1024] = {0};
sprintf(str, "create table tu%d using st2 tags(%d)", index, index); sprintf(str, "create table if not exists tu%d using st2 tags(%d)", index, index);
TAOS_RES* pRes = taos_query(pConn, str); TAOS_RES* pRes = taos_query(pConn, str);
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
@ -136,22 +136,43 @@ void createNewTable(TAOS* pConn, int32_t index) {
} }
taos_free_result(pRes); taos_free_result(pRes);
for (int32_t i = 0; i < 10000; i += 20) { if (startTs == 0) {
char sql[1024] = {0}; for (int32_t i = 0; i < numOfRows; i += 20) {
sprintf(sql, char sql[1024] = {0};
"insert into tu%d values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" sprintf(sql,
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" "insert into tu%d values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)" "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
"(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)", "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
index, i, i, i + 1, i + 1, i + 2, i + 2, i + 3, i + 3, i + 4, i + 4, i + 5, i + 5, i + 6, i + 6, i + 7, "(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)",
i + 7, i + 8, i + 8, i + 9, i + 9, i + 10, i + 10, i + 11, i + 11, i + 12, i + 12, i + 13, i + 13, i + 14, index, i, i, i + 1, i + 1, i + 2, i + 2, i + 3, i + 3, i + 4, i + 4, i + 5, i + 5, i + 6, i + 6, i + 7,
i + 14, i + 15, i + 15, i + 16, i + 16, i + 17, i + 17, i + 18, i + 18, i + 19, i + 19); i + 7, i + 8, i + 8, i + 9, i + 9, i + 10, i + 10, i + 11, i + 11, i + 12, i + 12, i + 13, i + 13, i + 14,
TAOS_RES* p = taos_query(pConn, sql); i + 14, i + 15, i + 15, i + 16, i + 16, i + 17, i + 17, i + 18, i + 18, i + 19, i + 19);
if (taos_errno(p) != 0) { TAOS_RES* p = taos_query(pConn, sql);
printf("failed to insert data, reason:%s\n", taos_errstr(p)); if (taos_errno(p) != 0) {
} printf("failed to insert data, reason:%s\n", taos_errstr(p));
}
taos_free_result(p); taos_free_result(p);
}
} else {
for (int32_t i = 0; i < numOfRows; i += 20) {
char sql[1024*50] = {0};
sprintf(sql,
"insert into tu%d values(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, "
"%d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, "
"'%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')(%ld, %d, '%s')",
index, startTs, i, pVarchar, startTs + 1, i + 1, pVarchar, startTs + 2, i + 2, pVarchar, startTs + 3, i + 3, pVarchar, startTs + 4, i + 4,
pVarchar, startTs + 5, i + 5, pVarchar, startTs + 6, i + 6, pVarchar, startTs + 7, i + 7, pVarchar, startTs + 8, i + 8, pVarchar, startTs + 9, i + 9,
pVarchar, startTs + 10, i + 10, pVarchar, startTs + 11, i + 11, pVarchar, startTs + 12, i + 12, pVarchar, startTs + 13, i + 13, pVarchar, startTs + 14,
i + 14, pVarchar, startTs + 15, i + 15, pVarchar, startTs + 16, i + 16, pVarchar, startTs + 17, i + 17, pVarchar, startTs + 18, i + 18,
pVarchar, startTs + 19, i + 19, pVarchar);
TAOS_RES* p = taos_query(pConn, sql);
if (taos_errno(p) != 0) {
printf("failed to insert data, reason:%s\n", taos_errstr(p));
}
taos_free_result(p);
}
} }
} }
@ -808,14 +829,7 @@ TEST(clientCase, projection_query_tables) {
TAOS_RES* pRes = taos_query(pConn, "use abc1"); TAOS_RES* pRes = taos_query(pConn, "use abc1");
taos_free_result(pRes); taos_free_result(pRes);
pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)"); pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int, f varchar(4096)) tags(a int)");
if (taos_errno(pRes) != 0) {
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create stable st2 (ts timestamp, k int) tags(a int)");
if (taos_errno(pRes) != 0) { if (taos_errno(pRes) != 0) {
printf("failed to create table tu, reason:%s\n", taos_errstr(pRes)); printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
} }
@ -828,28 +842,32 @@ TEST(clientCase, projection_query_tables) {
taos_free_result(pRes); taos_free_result(pRes);
int64_t start = 1685959190000; int64_t start = 1685959190000;
const char* pstr =
"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefgh"
"ijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnop"
"qrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwx"
"yzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdef"
"ghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz!@#$%^&&*&^^%$#@!qQWERTYUIOPASDFGHJKL:"
"QWERTYUIOP{}";
int32_t code = -1; for(int32_t i = 0; i < 10000; ++i) {
for(int32_t i = 0; i < 1000000; ++i) { char str[1024] = {0};
char t[512] = {0}; sprintf(str, "create table if not exists tu%d using st2 tags(%d)", i, i);
sprintf(t, "insert into t1 values(now, %d)", i); TAOS_RES* px = taos_query(pConn, str);
while(1) { if (taos_errno(px) != 0) {
void* p = taos_query(pConn, t); printf("failed to create table tu, reason:%s\n", taos_errstr(pRes));
code = taos_errno(p); }
taos_free_result(p); taos_free_result(px);
if (code != 0) { }
printf("insert data error, retry\n");
} else { for(int32_t j = 0; j < 5000; ++j) {
break; start += 20;
} for (int32_t i = 0; i < 10000; ++i) {
createNewTable(pConn, i, 20, start, pstr);
} }
} }
for (int32_t i = 0; i < 1; ++i) {
printf("create table :%d\n", i);
createNewTable(pConn, i);
}
// //
// pRes = taos_query(pConn, "select * from tu"); // pRes = taos_query(pConn, "select * from tu");
// if (taos_errno(pRes) != 0) { // if (taos_errno(pRes) != 0) {

View File

@ -86,7 +86,7 @@ typedef struct {
char err_msg[128]; char err_msg[128];
S3Status status; S3Status status;
uint64_t content_length; uint64_t content_length;
char *buf; char * buf;
int64_t buf_pos; int64_t buf_pos;
} TS3SizeCBD; } TS3SizeCBD;
@ -270,7 +270,7 @@ typedef struct list_parts_callback_data {
typedef struct MultipartPartData { typedef struct MultipartPartData {
put_object_callback_data put_object_data; put_object_callback_data put_object_data;
int seq; int seq;
UploadManager *manager; UploadManager * manager;
} MultipartPartData; } MultipartPartData;
static int putObjectDataCallback(int bufferSize, char *buffer, void *callbackData) { static int putObjectDataCallback(int bufferSize, char *buffer, void *callbackData) {
@ -317,7 +317,7 @@ S3Status MultipartResponseProperiesCallback(const S3ResponseProperties *properti
MultipartPartData *data = (MultipartPartData *)callbackData; MultipartPartData *data = (MultipartPartData *)callbackData;
int seq = data->seq; int seq = data->seq;
const char *etag = properties->eTag; const char * etag = properties->eTag;
data->manager->etags[seq - 1] = strdup(etag); data->manager->etags[seq - 1] = strdup(etag);
data->manager->next_etags_pos = seq; data->manager->next_etags_pos = seq;
return S3StatusOK; return S3StatusOK;
@ -450,10 +450,10 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
int32_t code = 0; int32_t code = 0;
const char *key = object; const char *key = object;
// const char *uploadId = 0; // const char *uploadId = 0;
const char *filename = 0; const char * filename = 0;
uint64_t contentLength = 0; uint64_t contentLength = 0;
const char *cacheControl = 0, *contentType = 0, *md5 = 0; const char * cacheControl = 0, *contentType = 0, *md5 = 0;
const char *contentDispositionFilename = 0, *contentEncoding = 0; const char * contentDispositionFilename = 0, *contentEncoding = 0;
int64_t expires = -1; int64_t expires = -1;
S3CannedAcl cannedAcl = S3CannedAclPrivate; S3CannedAcl cannedAcl = S3CannedAclPrivate;
int metaPropertiesCount = 0; int metaPropertiesCount = 0;
@ -467,6 +467,7 @@ int32_t s3PutObjectFromFile2(const char *file, const char *object) {
// data.infileFD = NULL; // data.infileFD = NULL;
// data.noStatus = noStatus; // data.noStatus = noStatus;
// uError("ERROR: %s stat file %s: ", __func__, file);
if (taosStatFile(file, &contentLength, NULL, NULL) < 0) { if (taosStatFile(file, &contentLength, NULL, NULL) < 0) {
uError("ERROR: %s Failed to stat file %s: ", __func__, file); uError("ERROR: %s Failed to stat file %s: ", __func__, file);
code = TAOS_SYSTEM_ERROR(errno); code = TAOS_SYSTEM_ERROR(errno);
@ -647,7 +648,7 @@ typedef struct list_bucket_callback_data {
char nextMarker[1024]; char nextMarker[1024];
int keyCount; int keyCount;
int allDetails; int allDetails;
SArray *objectArray; SArray * objectArray;
} list_bucket_callback_data; } list_bucket_callback_data;
static S3Status listBucketCallback(int isTruncated, const char *nextMarker, int contentsCount, static S3Status listBucketCallback(int isTruncated, const char *nextMarker, int contentsCount,
@ -692,11 +693,11 @@ static void s3FreeObjectKey(void *pItem) {
static SArray *getListByPrefix(const char *prefix) { static SArray *getListByPrefix(const char *prefix) {
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret, S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
0, awsRegionG}; 0, awsRegionG};
S3ListBucketHandler listBucketHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback}, S3ListBucketHandler listBucketHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
&listBucketCallback}; &listBucketCallback};
const char *marker = 0, *delimiter = 0; const char * marker = 0, *delimiter = 0;
int maxkeys = 0, allDetails = 0; int maxkeys = 0, allDetails = 0;
list_bucket_callback_data data; list_bucket_callback_data data;
data.objectArray = taosArrayInit(32, sizeof(void *)); data.objectArray = taosArrayInit(32, sizeof(void *));
@ -737,7 +738,7 @@ static SArray *getListByPrefix(const char *prefix) {
void s3DeleteObjects(const char *object_name[], int nobject) { void s3DeleteObjects(const char *object_name[], int nobject) {
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret, S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
0, awsRegionG}; 0, awsRegionG};
S3ResponseHandler responseHandler = {0, &responseCompleteCallback}; S3ResponseHandler responseHandler = {0, &responseCompleteCallback};
for (int i = 0; i < nobject; ++i) { for (int i = 0; i < nobject; ++i) {
@ -788,7 +789,7 @@ int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size,
const char *ifMatch = 0, *ifNotMatch = 0; const char *ifMatch = 0, *ifNotMatch = 0;
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret, S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
0, awsRegionG}; 0, awsRegionG};
S3GetConditions getConditions = {ifModifiedSince, ifNotModifiedSince, ifMatch, ifNotMatch}; S3GetConditions getConditions = {ifModifiedSince, ifNotModifiedSince, ifMatch, ifNotMatch};
S3GetObjectHandler getObjectHandler = {{&responsePropertiesCallback, &responseCompleteCallback}, S3GetObjectHandler getObjectHandler = {{&responsePropertiesCallback, &responseCompleteCallback},
&getObjectDataCallback}; &getObjectDataCallback};
@ -826,7 +827,7 @@ int32_t s3GetObjectToFile(const char *object_name, char *fileName) {
const char *ifMatch = 0, *ifNotMatch = 0; const char *ifMatch = 0, *ifNotMatch = 0;
S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret, S3BucketContext bucketContext = {0, tsS3BucketName, protocolG, uriStyleG, tsS3AccessKeyId, tsS3AccessKeySecret,
0, awsRegionG}; 0, awsRegionG};
S3GetConditions getConditions = {ifModifiedSince, ifNotModifiedSince, ifMatch, ifNotMatch}; S3GetConditions getConditions = {ifModifiedSince, ifNotModifiedSince, ifMatch, ifNotMatch};
S3GetObjectHandler getObjectHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback}, S3GetObjectHandler getObjectHandler = {{&responsePropertiesCallbackNull, &responseCompleteCallback},
&getObjectCallback}; &getObjectCallback};
@ -857,7 +858,7 @@ int32_t s3GetObjectsByPrefix(const char *prefix, const char *path) {
if (objectArray == NULL) return -1; if (objectArray == NULL) return -1;
for (size_t i = 0; i < taosArrayGetSize(objectArray); i++) { for (size_t i = 0; i < taosArrayGetSize(objectArray); i++) {
char *object = taosArrayGetP(objectArray, i); char * object = taosArrayGetP(objectArray, i);
const char *tmp = strchr(object, '/'); const char *tmp = strchr(object, '/');
tmp = (tmp == NULL) ? object : tmp + 1; tmp = (tmp == NULL) ? object : tmp + 1;
char fileName[PATH_MAX] = {0}; char fileName[PATH_MAX] = {0};
@ -948,12 +949,12 @@ static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) {
int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) { int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) {
int32_t code = 0; int32_t code = 0;
cos_pool_t *p = NULL; cos_pool_t * p = NULL;
int is_cname = 0; int is_cname = 0;
cos_status_t *s = NULL; cos_status_t * s = NULL;
cos_request_options_t *options = NULL; cos_request_options_t *options = NULL;
cos_string_t bucket, object, file; cos_string_t bucket, object, file;
cos_table_t *resp_headers; cos_table_t * resp_headers;
// int traffic_limit = 0; // int traffic_limit = 0;
cos_pool_create(&p, NULL); cos_pool_create(&p, NULL);
@ -984,14 +985,14 @@ int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) {
int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str) { int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str) {
int32_t code = 0; int32_t code = 0;
cos_pool_t *p = NULL; cos_pool_t * p = NULL;
int is_cname = 0; int is_cname = 0;
cos_status_t *s = NULL; cos_status_t * s = NULL;
cos_request_options_t *options = NULL; cos_request_options_t * options = NULL;
cos_string_t bucket, object, file; cos_string_t bucket, object, file;
cos_table_t *resp_headers; cos_table_t * resp_headers;
int traffic_limit = 0; int traffic_limit = 0;
cos_table_t *headers = NULL; cos_table_t * headers = NULL;
cos_resumable_clt_params_t *clt_params = NULL; cos_resumable_clt_params_t *clt_params = NULL;
cos_pool_create(&p, NULL); cos_pool_create(&p, NULL);
@ -1024,11 +1025,11 @@ int32_t s3PutObjectFromFile2(const char *file_str, const char *object_str) {
} }
void s3DeleteObjectsByPrefix(const char *prefix_str) { void s3DeleteObjectsByPrefix(const char *prefix_str) {
cos_pool_t *p = NULL; cos_pool_t * p = NULL;
cos_request_options_t *options = NULL; cos_request_options_t *options = NULL;
int is_cname = 0; int is_cname = 0;
cos_string_t bucket; cos_string_t bucket;
cos_status_t *s = NULL; cos_status_t * s = NULL;
cos_string_t prefix; cos_string_t prefix;
cos_pool_create(&p, NULL); cos_pool_create(&p, NULL);
@ -1043,10 +1044,10 @@ void s3DeleteObjectsByPrefix(const char *prefix_str) {
} }
void s3DeleteObjects(const char *object_name[], int nobject) { void s3DeleteObjects(const char *object_name[], int nobject) {
cos_pool_t *p = NULL; cos_pool_t * p = NULL;
int is_cname = 0; int is_cname = 0;
cos_string_t bucket; cos_string_t bucket;
cos_table_t *resp_headers = NULL; cos_table_t * resp_headers = NULL;
cos_request_options_t *options = NULL; cos_request_options_t *options = NULL;
cos_list_t object_list; cos_list_t object_list;
cos_list_t deleted_object_list; cos_list_t deleted_object_list;
@ -1080,14 +1081,14 @@ void s3DeleteObjects(const char *object_name[], int nobject) {
bool s3Exists(const char *object_name) { bool s3Exists(const char *object_name) {
bool ret = false; bool ret = false;
cos_pool_t *p = NULL; cos_pool_t * p = NULL;
int is_cname = 0; int is_cname = 0;
cos_status_t *s = NULL; cos_status_t * s = NULL;
cos_request_options_t *options = NULL; cos_request_options_t * options = NULL;
cos_string_t bucket; cos_string_t bucket;
cos_string_t object; cos_string_t object;
cos_table_t *resp_headers; cos_table_t * resp_headers;
cos_table_t *headers = NULL; cos_table_t * headers = NULL;
cos_object_exist_status_e object_exist; cos_object_exist_status_e object_exist;
cos_pool_create(&p, NULL); cos_pool_create(&p, NULL);
@ -1114,15 +1115,15 @@ bool s3Exists(const char *object_name) {
bool s3Get(const char *object_name, const char *path) { bool s3Get(const char *object_name, const char *path) {
bool ret = false; bool ret = false;
cos_pool_t *p = NULL; cos_pool_t * p = NULL;
int is_cname = 0; int is_cname = 0;
cos_status_t *s = NULL; cos_status_t * s = NULL;
cos_request_options_t *options = NULL; cos_request_options_t *options = NULL;
cos_string_t bucket; cos_string_t bucket;
cos_string_t object; cos_string_t object;
cos_string_t file; cos_string_t file;
cos_table_t *resp_headers = NULL; cos_table_t * resp_headers = NULL;
cos_table_t *headers = NULL; cos_table_t * headers = NULL;
int traffic_limit = 0; int traffic_limit = 0;
//创建内存池 //创建内存池
@ -1158,15 +1159,15 @@ bool s3Get(const char *object_name, const char *path) {
int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t block_size, bool check, uint8_t **ppBlock) { int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t block_size, bool check, uint8_t **ppBlock) {
(void)check; (void)check;
int32_t code = 0; int32_t code = 0;
cos_pool_t *p = NULL; cos_pool_t * p = NULL;
int is_cname = 0; int is_cname = 0;
cos_status_t *s = NULL; cos_status_t * s = NULL;
cos_request_options_t *options = NULL; cos_request_options_t *options = NULL;
cos_string_t bucket; cos_string_t bucket;
cos_string_t object; cos_string_t object;
cos_table_t *resp_headers; cos_table_t * resp_headers;
cos_table_t *headers = NULL; cos_table_t * headers = NULL;
cos_buf_t *content = NULL; cos_buf_t * content = NULL;
// cos_string_t file; // cos_string_t file;
// int traffic_limit = 0; // int traffic_limit = 0;
char range_buf[64]; char range_buf[64];
@ -1260,7 +1261,7 @@ void s3EvictCache(const char *path, long object_size) {
terrno = TAOS_SYSTEM_ERROR(errno); terrno = TAOS_SYSTEM_ERROR(errno);
vError("failed to open %s since %s", dir_name, terrstr()); vError("failed to open %s since %s", dir_name, terrstr());
} }
SArray *evict_files = taosArrayInit(16, sizeof(SEvictFile)); SArray * evict_files = taosArrayInit(16, sizeof(SEvictFile));
tdbDirEntryPtr pDirEntry; tdbDirEntryPtr pDirEntry;
while ((pDirEntry = taosReadDir(pDir)) != NULL) { while ((pDirEntry = taosReadDir(pDir)) != NULL) {
char *name = taosGetDirEntryName(pDirEntry); char *name = taosGetDirEntryName(pDirEntry);
@ -1302,13 +1303,13 @@ void s3EvictCache(const char *path, long object_size) {
long s3Size(const char *object_name) { long s3Size(const char *object_name) {
long size = 0; long size = 0;
cos_pool_t *p = NULL; cos_pool_t * p = NULL;
int is_cname = 0; int is_cname = 0;
cos_status_t *s = NULL; cos_status_t * s = NULL;
cos_request_options_t *options = NULL; cos_request_options_t *options = NULL;
cos_string_t bucket; cos_string_t bucket;
cos_string_t object; cos_string_t object;
cos_table_t *resp_headers = NULL; cos_table_t * resp_headers = NULL;
//创建内存池 //创建内存池
cos_pool_create(&p, NULL); cos_pool_create(&p, NULL);
@ -1354,5 +1355,6 @@ int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size,
void s3EvictCache(const char *path, long object_size) {} void s3EvictCache(const char *path, long object_size) {}
long s3Size(const char *object_name) { return 0; } long s3Size(const char *object_name) { return 0; }
int32_t s3GetObjectsByPrefix(const char *prefix, const char *path) { return 0; } int32_t s3GetObjectsByPrefix(const char *prefix, const char *path) { return 0; }
int32_t s3GetObjectToFile(const char *object_name, char *fileName) { return 0; }
#endif #endif

View File

@ -107,7 +107,7 @@ bool tsEnableTelem = true;
int32_t tsTelemInterval = 43200; int32_t tsTelemInterval = 43200;
char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.tdengine.com"; char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.tdengine.com";
uint16_t tsTelemPort = 80; uint16_t tsTelemPort = 80;
char *tsTelemUri = "/report"; char * tsTelemUri = "/report";
#ifdef TD_ENTERPRISE #ifdef TD_ENTERPRISE
bool tsEnableCrashReport = false; bool tsEnableCrashReport = false;
@ -354,16 +354,24 @@ static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *input
char cfgFile[PATH_MAX + 100] = {0}; char cfgFile[PATH_MAX + 100] = {0};
taosExpandDir(inputCfgDir, cfgDir, PATH_MAX); taosExpandDir(inputCfgDir, cfgDir, PATH_MAX);
char lastC = cfgDir[strlen(cfgDir) - 1]; char lastC = cfgDir[strlen(cfgDir) - 1];
char *tdDirsep = TD_DIRSEP; char *tdDirsep = TD_DIRSEP;
if (lastC == '\\' || lastC == '/') { if (lastC == '\\' || lastC == '/') {
tdDirsep = ""; tdDirsep = "";
} }
if (taosIsDir(cfgDir)) { if (taosIsDir(cfgDir)) {
#ifdef CUS_PROMPT #ifdef CUS_PROMPT
snprintf(cfgFile, sizeof(cfgFile), "%s" "%s" "%s.cfg", cfgDir, tdDirsep, CUS_PROMPT); snprintf(cfgFile, sizeof(cfgFile),
"%s"
"%s"
"%s.cfg",
cfgDir, tdDirsep, CUS_PROMPT);
#else #else
snprintf(cfgFile, sizeof(cfgFile), "%s" "%s" "taos.cfg", cfgDir, tdDirsep); snprintf(cfgFile, sizeof(cfgFile),
"%s"
"%s"
"taos.cfg",
cfgDir, tdDirsep);
#endif #endif
} else { } else {
tstrncpy(cfgFile, cfgDir, sizeof(cfgDir)); tstrncpy(cfgFile, cfgDir, sizeof(cfgDir));
@ -729,6 +737,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddBool(pCfg, "disableStream", tsDisableStream, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "disableStream", tsDisableStream, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER) != 0) return -1;
if (cfgAddInt64(pCfg, "streamBufferSize", tsStreamBufferSize, 0, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0) if (cfgAddInt64(pCfg, "streamBufferSize", tsStreamBufferSize, 0, INT64_MAX, CFG_SCOPE_SERVER, CFG_DYN_NONE) != 0)
return -1; return -1;
if (cfgAddInt32(pCfg, "checkpointInterval", tsStreamCheckpointInterval, 60, 1200, CFG_SCOPE_SERVER, if (cfgAddInt32(pCfg, "checkpointInterval", tsStreamCheckpointInterval, 60, 1200, CFG_SCOPE_SERVER,
CFG_DYN_ENT_SERVER) != 0) CFG_DYN_ENT_SERVER) != 0)
return -1; return -1;
@ -1347,7 +1356,7 @@ void taosCleanupCfg() {
typedef struct { typedef struct {
const char *optionName; const char *optionName;
void *optionVar; void * optionVar;
} OptionNameAndVar; } OptionNameAndVar;
static int32_t taosCfgSetOption(OptionNameAndVar *pOptions, int32_t optionSize, SConfigItem *pItem, bool isDebugflag) { static int32_t taosCfgSetOption(OptionNameAndVar *pOptions, int32_t optionSize, SConfigItem *pItem, bool isDebugflag) {
@ -1360,7 +1369,7 @@ static int32_t taosCfgSetOption(OptionNameAndVar *pOptions, int32_t optionSize,
switch (pItem->dtype) { switch (pItem->dtype) {
case CFG_DTYPE_BOOL: { case CFG_DTYPE_BOOL: {
int32_t flag = pItem->i32; int32_t flag = pItem->i32;
bool *pVar = pOptions[d].optionVar; bool * pVar = pOptions[d].optionVar;
uInfo("%s set from %d to %d", optName, *pVar, flag); uInfo("%s set from %d to %d", optName, *pVar, flag);
*pVar = flag; *pVar = flag;
terrno = TSDB_CODE_SUCCESS; terrno = TSDB_CODE_SUCCESS;

View File

@ -27,8 +27,6 @@ void mndCleanupCluster(SMnode *pMnode);
int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len); int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len);
int64_t mndGetClusterId(SMnode *pMnode); int64_t mndGetClusterId(SMnode *pMnode);
int64_t mndGetClusterCreateTime(SMnode *pMnode); int64_t mndGetClusterCreateTime(SMnode *pMnode);
int32_t mndGetClusterGrantedInfo(SMnode *pMnode, SGrantedInfo *pInfo);
int32_t mndSetClusterGrantedInfo(SMnode *pMnode, SGrantedInfo *pInfo);
int64_t mndGetClusterUpTime(SMnode *pMnode); int64_t mndGetClusterUpTime(SMnode *pMnode);
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -192,8 +192,6 @@ typedef struct {
int64_t createdTime; int64_t createdTime;
int64_t updateTime; int64_t updateTime;
int32_t upTime; int32_t upTime;
int64_t grantedTime;
int64_t connGrantedTime;
} SClusterObj; } SClusterObj;
typedef struct { typedef struct {
@ -700,6 +698,11 @@ typedef struct {
} SStreamObj; } SStreamObj;
typedef struct SStreamSeq {
char name[24];
uint64_t seq;
SRWLatch lock;
} SStreamSeq;
int32_t tEncodeSStreamObj(SEncoder* pEncoder, const SStreamObj* pObj); int32_t tEncodeSStreamObj(SEncoder* pEncoder, const SStreamObj* pObj);
int32_t tDecodeSStreamObj(SDecoder* pDecoder, SStreamObj* pObj, int32_t sver); int32_t tDecodeSStreamObj(SDecoder* pDecoder, SStreamObj* pObj, int32_t sver);
void tFreeStreamObj(SStreamObj* pObj); void tFreeStreamObj(SStreamObj* pObj);
@ -731,14 +734,13 @@ typedef struct {
int8_t type; int8_t type;
int32_t numOfCols; int32_t numOfCols;
SSchema* pSchema; SSchema* pSchema;
SRWLatch lock; SRWLatch lock;
} SViewObj; } SViewObj;
int32_t tEncodeSViewObj(SEncoder* pEncoder, const SViewObj* pObj); int32_t tEncodeSViewObj(SEncoder* pEncoder, const SViewObj* pObj);
int32_t tDecodeSViewObj(SDecoder* pDecoder, SViewObj* pObj, int32_t sver); int32_t tDecodeSViewObj(SDecoder* pDecoder, SViewObj* pObj, int32_t sver);
void tFreeSViewObj(SViewObj* pObj); void tFreeSViewObj(SViewObj* pObj);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -28,18 +28,28 @@ typedef struct SStreamTransInfo {
const char *name; const char *name;
} SStreamTransInfo; } SStreamTransInfo;
// time to generated the checkpoint, if now() - checkpointTs >= tsCheckpointInterval, this checkpoint will be discard
// to avoid too many checkpoints for a taskk in the waiting list
typedef struct SCheckpointCandEntry {
char * pName;
int64_t streamId;
int64_t checkpointTs;
int64_t checkpointId;
} SCheckpointCandEntry;
typedef struct SStreamTransMgmt { typedef struct SStreamTransMgmt {
SHashObj *pDBTrans; SHashObj *pDBTrans;
SHashObj *pWaitingList; // stream id list, of which timed checkpoint failed to be issued due to the trans conflict.
} SStreamTransMgmt; } SStreamTransMgmt;
typedef struct SStreamExecInfo { typedef struct SStreamExecInfo {
SArray *pNodeList; SArray * pNodeList;
int64_t ts; // snapshot ts int64_t ts; // snapshot ts
SStreamTransMgmt transMgmt; SStreamTransMgmt transMgmt;
int64_t activeCheckpoint; // active check point id int64_t activeCheckpoint; // active check point id
SHashObj * pTaskMap; SHashObj * pTaskMap;
SArray * pTaskList; SArray * pTaskList;
TdThreadMutex lock; TdThreadMutex lock;
} SStreamExecInfo; } SStreamExecInfo;
extern SStreamExecInfo execInfo; extern SStreamExecInfo execInfo;
@ -51,8 +61,9 @@ void mndReleaseStream(SMnode *pMnode, SStreamObj *pStream);
int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb); int32_t mndDropStreamByDb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb);
int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream); int32_t mndPersistStream(SMnode *pMnode, STrans *pTrans, SStreamObj *pStream);
int32_t mndStreamRegisterTrans(STrans* pTrans, const char* pName, const char* pSrcDb, const char* pDstDb); int32_t mndStreamRegisterTrans(STrans *pTrans, const char *pName, const char *pSrcDb, const char *pDstDb);
bool streamTransConflictOtherTrans(SMnode *pMnode, const char *pSrcDb, const char *pDstDb); int32_t mndAddtoCheckpointWaitingList(SStreamObj *pStream, int64_t checkpointId);
bool streamTransConflictOtherTrans(SMnode *pMnode, const char *pSrcDb, const char *pDstDb, bool lock);
// for sma // for sma
// TODO refactor // TODO refactor

View File

@ -19,7 +19,7 @@
#include "mndTrans.h" #include "mndTrans.h"
#define CLUSTER_VER_NUMBE 1 #define CLUSTER_VER_NUMBE 1
#define CLUSTER_RESERVE_SIZE 44 #define CLUSTER_RESERVE_SIZE 60
int64_t tsExpireTime = 0; int64_t tsExpireTime = 0;
static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster); static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster);
@ -112,19 +112,6 @@ int64_t mndGetClusterCreateTime(SMnode *pMnode) {
return createTime; return createTime;
} }
int32_t mndGetClusterGrantedInfo(SMnode *pMnode, SGrantedInfo *pInfo) {
void *pIter = NULL;
SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter);
if (pCluster != NULL) {
pInfo->grantedTime = pCluster->grantedTime;
pInfo->connGrantedTime = pCluster->connGrantedTime;
mndReleaseCluster(pMnode, pCluster, pIter);
return 0;
}
return -1;
}
static int32_t mndGetClusterUpTimeImp(SClusterObj *pCluster) { static int32_t mndGetClusterUpTimeImp(SClusterObj *pCluster) {
#if 0 #if 0
int32_t upTime = taosGetTimestampSec() - pCluster->updateTime / 1000; int32_t upTime = taosGetTimestampSec() - pCluster->updateTime / 1000;
@ -159,8 +146,6 @@ static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) {
SDB_SET_INT64(pRaw, dataPos, pCluster->updateTime, _OVER) SDB_SET_INT64(pRaw, dataPos, pCluster->updateTime, _OVER)
SDB_SET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER) SDB_SET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER)
SDB_SET_INT32(pRaw, dataPos, pCluster->upTime, _OVER) SDB_SET_INT32(pRaw, dataPos, pCluster->upTime, _OVER)
SDB_SET_INT64(pRaw, dataPos, pCluster->grantedTime, _OVER)
SDB_SET_INT64(pRaw, dataPos, pCluster->connGrantedTime, _OVER)
SDB_SET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER) SDB_SET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER)
terrno = 0; terrno = 0;
@ -201,8 +186,6 @@ static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT64(pRaw, dataPos, &pCluster->updateTime, _OVER) SDB_GET_INT64(pRaw, dataPos, &pCluster->updateTime, _OVER)
SDB_GET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER) SDB_GET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pCluster->upTime, _OVER) SDB_GET_INT32(pRaw, dataPos, &pCluster->upTime, _OVER)
SDB_GET_INT64(pRaw, dataPos, &pCluster->grantedTime, _OVER);
SDB_GET_INT64(pRaw, dataPos, &pCluster->connGrantedTime, _OVER);
SDB_GET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER) SDB_GET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER)
terrno = 0; terrno = 0;
@ -235,8 +218,6 @@ static int32_t mndClusterActionUpdate(SSdb *pSdb, SClusterObj *pOld, SClusterObj
mTrace("cluster:%" PRId64 ", perform update action, old row:%p new row:%p, uptime from %d to %d", pOld->id, pOld, mTrace("cluster:%" PRId64 ", perform update action, old row:%p new row:%p, uptime from %d to %d", pOld->id, pOld,
pNew, pOld->upTime, pNew->upTime); pNew, pOld->upTime, pNew->upTime);
pOld->upTime = pNew->upTime; pOld->upTime = pNew->upTime;
pOld->grantedTime = pNew->grantedTime;
pOld->connGrantedTime = pNew->connGrantedTime;
pOld->updateTime = taosGetTimestampMs(); pOld->updateTime = taosGetTimestampMs();
return 0; return 0;
} }
@ -378,44 +359,3 @@ static int32_t mndProcessUptimeTimer(SRpcMsg *pReq) {
mndTransDrop(pTrans); mndTransDrop(pTrans);
return 0; return 0;
} }
int32_t mndSetClusterGrantedInfo(SMnode *pMnode, SGrantedInfo *pInfo) {
SClusterObj clusterObj = {0};
void *pIter = NULL;
SClusterObj *pCluster = mndAcquireCluster(pMnode, &pIter);
if (pCluster != NULL) {
if (pCluster->grantedTime >= pInfo->grantedTime && pCluster->connGrantedTime >= pInfo->connGrantedTime) {
mndReleaseCluster(pMnode, pCluster, pIter);
return 0;
}
memcpy(&clusterObj, pCluster, sizeof(SClusterObj));
if (pCluster->grantedTime < pInfo->grantedTime) clusterObj.grantedTime = pInfo->grantedTime;
if (pCluster->connGrantedTime < pInfo->connGrantedTime) clusterObj.connGrantedTime = pInfo->connGrantedTime;
mndReleaseCluster(pMnode, pCluster, pIter);
}
if (clusterObj.id <= 0) {
mError("can't get cluster info while update granted info");
return -1;
}
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, NULL, "granted-info");
if (pTrans == NULL) return -1;
SSdbRaw *pCommitRaw = mndClusterActionEncode(&clusterObj);
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
mndTransDrop(pTrans);
return -1;
}
(void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY);
if (mndTransPrepare(pMnode, pTrans) != 0) {
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
mndTransDrop(pTrans);
return -1;
}
mndTransDrop(pTrans);
return 0;
}

View File

@ -790,9 +790,7 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg
if (cfgAll) { // alter all dnodes: if (cfgAll) { // alter all dnodes:
if (!failRecord) failRecord = taosArrayInit(1, sizeof(int32_t)); if (!failRecord) failRecord = taosArrayInit(1, sizeof(int32_t));
if (failRecord) taosArrayPush(failRecord, &pDnode->id); if (failRecord) taosArrayPush(failRecord, &pDnode->id);
if (0 == cfgAllErr || cfgAllErr == TSDB_CODE_GRANT_PAR_IVLD_ACTIVE) { if (0 == cfgAllErr) cfgAllErr = terrno; // output 1st terrno.
cfgAllErr = terrno; // output 1st or more specific error
}
} }
} else { } else {
terrno = 0; // no action for dup active code terrno = 0; // no action for dup active code
@ -808,9 +806,7 @@ static int32_t mndConfigDnode(SMnode *pMnode, SRpcMsg *pReq, SMCfgDnodeReq *pCfg
if (cfgAll) { if (cfgAll) {
if (!failRecord) failRecord = taosArrayInit(1, sizeof(int32_t)); if (!failRecord) failRecord = taosArrayInit(1, sizeof(int32_t));
if (failRecord) taosArrayPush(failRecord, &pDnode->id); if (failRecord) taosArrayPush(failRecord, &pDnode->id);
if (0 == cfgAllErr || cfgAllErr == TSDB_CODE_GRANT_PAR_IVLD_ACTIVE) { if (0 == cfgAllErr) cfgAllErr = terrno;
cfgAllErr = terrno; // output 1st or more specific error
}
} }
} else { } else {
terrno = 0; terrno = 0;
@ -1287,12 +1283,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
strcpy(dcfgReq.config, "supportvnodes"); strcpy(dcfgReq.config, "supportvnodes");
snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag); snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%d", flag);
} else if (strncasecmp(cfgReq.config, GRANT_ACTIVE_CODE, 10) == 0 || } else if (strncasecmp(cfgReq.config, "activeCode", 10) == 0 || strncasecmp(cfgReq.config, "cActiveCode", 11) == 0) {
strncasecmp(cfgReq.config, GRANT_C_ACTIVE_CODE, 11) == 0) {
if (cfgReq.dnodeId != -1) {
terrno = TSDB_CODE_INVALID_CFG;
goto _err_out;
}
int8_t opt = strncasecmp(cfgReq.config, "a", 1) == 0 ? DND_ACTIVE_CODE : DND_CONN_ACTIVE_CODE; int8_t opt = strncasecmp(cfgReq.config, "a", 1) == 0 ? DND_ACTIVE_CODE : DND_CONN_ACTIVE_CODE;
int8_t index = opt == DND_ACTIVE_CODE ? 10 : 11; int8_t index = opt == DND_ACTIVE_CODE ? 10 : 11;
if (' ' != cfgReq.config[index] && 0 != cfgReq.config[index]) { if (' ' != cfgReq.config[index] && 0 != cfgReq.config[index]) {
@ -1310,11 +1301,12 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
goto _err_out; goto _err_out;
} }
strcpy(dcfgReq.config, opt == DND_ACTIVE_CODE ? GRANT_ACTIVE_CODE : GRANT_C_ACTIVE_CODE); strcpy(dcfgReq.config, opt == DND_ACTIVE_CODE ? "activeCode" : "cActiveCode");
snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%s", cfgReq.value); snprintf(dcfgReq.value, TSDB_DNODE_VALUE_LEN, "%s", cfgReq.value);
if ((terrno = mndConfigDnode(pMnode, pReq, &cfgReq, opt)) != 0) { if (mndConfigDnode(pMnode, pReq, &cfgReq, opt) != 0) {
mError("dnode:%d, failed to config activeCode since %s", cfgReq.dnodeId, terrstr()); mError("dnode:%d, failed to config activeCode since %s", cfgReq.dnodeId, terrstr());
terrno = TSDB_CODE_INVALID_CFG;
goto _err_out; goto _err_out;
} }
tFreeSMCfgDnodeReq(&cfgReq); tFreeSMCfgDnodeReq(&cfgReq);

View File

@ -146,6 +146,15 @@ static void mndStreamCheckpointTick(SMnode *pMnode, int64_t sec) {
} }
} }
static void mndStreamCheckpointRemain(SMnode* pMnode) {
int32_t contLen = 0;
void *pReq = mndBuildCheckpointTickMsg(&contLen, 0);
if (pReq != NULL) {
SRpcMsg rpcMsg = {.msgType = TDMT_MND_STREAM_CHECKPOINT_CANDIDITATE, .pCont = pReq, .contLen = contLen};
tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg);
}
}
static void mndStreamCheckNode(SMnode* pMnode) { static void mndStreamCheckNode(SMnode* pMnode) {
int32_t contLen = 0; int32_t contLen = 0;
void *pReq = mndBuildTimerMsg(&contLen); void *pReq = mndBuildTimerMsg(&contLen);
@ -286,6 +295,10 @@ static void *mndThreadFp(void *param) {
mndStreamCheckpointTick(pMnode, sec); mndStreamCheckpointTick(pMnode, sec);
} }
if (sec % 5 == 0) {
mndStreamCheckpointRemain(pMnode);
}
if (sec % tsStreamNodeCheckInterval == 0) { if (sec % tsStreamNodeCheckInterval == 0) {
mndStreamCheckNode(pMnode); mndStreamCheckNode(pMnode);
} }

File diff suppressed because it is too large Load Diff

View File

@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include "mndTrans.h"
#include "mndStream.h" #include "mndStream.h"
#include "mndTrans.h"
typedef struct SKeyInfo { typedef struct SKeyInfo {
void* pKey; void* pKey;
@ -35,17 +35,15 @@ int32_t mndStreamRegisterTrans(STrans* pTrans, const char* pName, const char* pS
} }
int32_t clearFinishedTrans(SMnode* pMnode) { int32_t clearFinishedTrans(SMnode* pMnode) {
SArray* pList = taosArrayInit(4, sizeof(SKeyInfo));
size_t keyLen = 0; size_t keyLen = 0;
SArray* pList = taosArrayInit(4, sizeof(SKeyInfo));
void* pIter = NULL;
taosThreadMutexLock(&execInfo.lock);
void* pIter = NULL;
while ((pIter = taosHashIterate(execInfo.transMgmt.pDBTrans, pIter)) != NULL) { while ((pIter = taosHashIterate(execInfo.transMgmt.pDBTrans, pIter)) != NULL) {
SStreamTransInfo *pEntry = (SStreamTransInfo *)pIter; SStreamTransInfo* pEntry = (SStreamTransInfo*)pIter;
STrans* pTrans = mndAcquireTrans(pMnode, pEntry->transId);
// let's clear the finished trans // let's clear the finished trans
STrans* pTrans = mndAcquireTrans(pMnode, pEntry->transId);
if (pTrans == NULL) { if (pTrans == NULL) {
void* pKey = taosHashGetKey(pEntry, &keyLen); void* pKey = taosHashGetKey(pEntry, &keyLen);
// key is the name of src/dst db name // key is the name of src/dst db name
@ -60,46 +58,76 @@ int32_t clearFinishedTrans(SMnode* pMnode) {
} }
size_t num = taosArrayGetSize(pList); size_t num = taosArrayGetSize(pList);
for(int32_t i = 0; i < num; ++i) { for (int32_t i = 0; i < num; ++i) {
SKeyInfo* pKey = taosArrayGet(pList, i); SKeyInfo* pKey = taosArrayGet(pList, i);
taosHashRemove(execInfo.transMgmt.pDBTrans, pKey->pKey, pKey->keyLen); taosHashRemove(execInfo.transMgmt.pDBTrans, pKey->pKey, pKey->keyLen);
} }
mDebug("clear %d finished stream-trans, remained:%d", (int32_t) num, taosHashGetSize(execInfo.transMgmt.pDBTrans)); mDebug("clear %d finished stream-trans, remained:%d", (int32_t)num, taosHashGetSize(execInfo.transMgmt.pDBTrans));
taosThreadMutexUnlock(&execInfo.lock);
terrno = TSDB_CODE_SUCCESS; terrno = TSDB_CODE_SUCCESS;
taosArrayDestroy(pList); taosArrayDestroy(pList);
return 0; return 0;
} }
bool streamTransConflictOtherTrans(SMnode* pMnode, const char* pSrcDb, const char* pDstDb) { bool streamTransConflictOtherTrans(SMnode* pMnode, const char* pSrcDb, const char* pDstDb, bool lock) {
clearFinishedTrans(pMnode); if (lock) {
taosThreadMutexLock(&execInfo.lock);
}
taosThreadMutexLock(&execInfo.lock);
int32_t num = taosHashGetSize(execInfo.transMgmt.pDBTrans); int32_t num = taosHashGetSize(execInfo.transMgmt.pDBTrans);
if (num <= 0) { if (num <= 0) {
taosThreadMutexUnlock(&execInfo.lock); if (lock) {
taosThreadMutexUnlock(&execInfo.lock);
}
return false; return false;
} }
clearFinishedTrans(pMnode);
SStreamTransInfo *pEntry = taosHashGet(execInfo.transMgmt.pDBTrans, pSrcDb, strlen(pSrcDb)); SStreamTransInfo *pEntry = taosHashGet(execInfo.transMgmt.pDBTrans, pSrcDb, strlen(pSrcDb));
if (pEntry != NULL) { if (pEntry != NULL) {
taosThreadMutexUnlock(&execInfo.lock); if (lock) {
taosThreadMutexUnlock(&execInfo.lock);
}
mWarn("conflict with other transId:%d in Db:%s, trans:%s", pEntry->transId, pSrcDb, pEntry->name); mWarn("conflict with other transId:%d in Db:%s, trans:%s", pEntry->transId, pSrcDb, pEntry->name);
return true; return true;
} }
pEntry = taosHashGet(execInfo.transMgmt.pDBTrans, pDstDb, strlen(pDstDb)); pEntry = taosHashGet(execInfo.transMgmt.pDBTrans, pDstDb, strlen(pDstDb));
if (pEntry != NULL) { if (pEntry != NULL) {
taosThreadMutexUnlock(&execInfo.lock); if (lock) {
taosThreadMutexUnlock(&execInfo.lock);
}
mWarn("conflict with other transId:%d in Db:%s, trans:%s", pEntry->transId, pSrcDb, pEntry->name); mWarn("conflict with other transId:%d in Db:%s, trans:%s", pEntry->transId, pSrcDb, pEntry->name);
return true; return true;
} }
taosThreadMutexUnlock(&execInfo.lock); if (lock) {
taosThreadMutexUnlock(&execInfo.lock);
}
return false; return false;
} }
int32_t mndAddtoCheckpointWaitingList(SStreamObj* pStream, int64_t checkpointId) {
SCheckpointCandEntry* pEntry = taosHashGet(execInfo.transMgmt.pWaitingList, &pStream->uid, sizeof(pStream->uid));
if (pEntry == NULL) {
SCheckpointCandEntry entry = {.streamId = pStream->uid,
.checkpointTs = taosGetTimestampMs(),
.checkpointId = checkpointId,
.pName = taosStrdup(pStream->name)};
taosHashPut(execInfo.transMgmt.pWaitingList, &pStream->uid, sizeof(pStream->uid), &entry, sizeof(entry));
int32_t size = taosHashGetSize(execInfo.transMgmt.pWaitingList);
mDebug("stream:%" PRIx64 " add into waiting list due to conflict, ts:%" PRId64 " , checkpointId: %" PRId64
", total in waitingList:%d",
pStream->uid, entry.checkpointTs, checkpointId, size);
} else {
mDebug("stream:%" PRIx64 " ts:%" PRId64 ", checkpointId:%" PRId64 " already in waiting list, no need to add into",
pStream->uid, pEntry->checkpointTs, checkpointId);
}
return TSDB_CODE_SUCCESS;
}

View File

@ -834,7 +834,7 @@ int32_t mndTransCheckConflict(SMnode *pMnode, STrans *pTrans) {
if (mndCheckTransConflict(pMnode, pTrans)) { if (mndCheckTransConflict(pMnode, pTrans)) {
terrno = TSDB_CODE_MND_TRANS_CONFLICT; terrno = TSDB_CODE_MND_TRANS_CONFLICT;
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
return -1; return terrno;
} }
return 0; return 0;

View File

@ -708,9 +708,8 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
int32_t numOfAlterViews = taosHashGetSize(pUser->alterViews); int32_t numOfAlterViews = taosHashGetSize(pUser->alterViews);
int32_t numOfTopics = taosHashGetSize(pUser->topics); int32_t numOfTopics = taosHashGetSize(pUser->topics);
int32_t numOfUseDbs = taosHashGetSize(pUser->useDbs); int32_t numOfUseDbs = taosHashGetSize(pUser->useDbs);
int32_t size = sizeof(SUserObj) + USER_RESERVE_SIZE + int32_t size = sizeof(SUserObj) + USER_RESERVE_SIZE + (numOfReadDbs + numOfWriteDbs) * TSDB_DB_FNAME_LEN +
(numOfReadDbs + numOfWriteDbs + numOfUseDbs) * TSDB_DB_FNAME_LEN + numOfTopics * TSDB_TOPIC_FNAME_LEN + numOfTopics * TSDB_TOPIC_FNAME_LEN + ipWhiteReserve;
ipWhiteReserve;
char *stb = taosHashIterate(pUser->readTbs, NULL); char *stb = taosHashIterate(pUser->readTbs, NULL);
while (stb != NULL) { while (stb != NULL) {
@ -720,7 +719,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
size += keyLen; size += keyLen;
size_t valueLen = 0; size_t valueLen = 0;
valueLen = strlen(stb); valueLen = strlen(stb) + 1;
size += sizeof(int32_t); size += sizeof(int32_t);
size += valueLen; size += valueLen;
stb = taosHashIterate(pUser->readTbs, stb); stb = taosHashIterate(pUser->readTbs, stb);
@ -734,7 +733,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
size += keyLen; size += keyLen;
size_t valueLen = 0; size_t valueLen = 0;
valueLen = strlen(stb); valueLen = strlen(stb) + 1;
size += sizeof(int32_t); size += sizeof(int32_t);
size += valueLen; size += valueLen;
stb = taosHashIterate(pUser->writeTbs, stb); stb = taosHashIterate(pUser->writeTbs, stb);
@ -748,7 +747,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
size += keyLen; size += keyLen;
size_t valueLen = 0; size_t valueLen = 0;
valueLen = strlen(stb); valueLen = strlen(stb) + 1;
size += sizeof(int32_t); size += sizeof(int32_t);
size += valueLen; size += valueLen;
stb = taosHashIterate(pUser->alterTbs, stb); stb = taosHashIterate(pUser->alterTbs, stb);
@ -762,7 +761,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
size += keyLen; size += keyLen;
size_t valueLen = 0; size_t valueLen = 0;
valueLen = strlen(stb); valueLen = strlen(stb) + 1;
size += sizeof(int32_t); size += sizeof(int32_t);
size += valueLen; size += valueLen;
stb = taosHashIterate(pUser->readViews, stb); stb = taosHashIterate(pUser->readViews, stb);
@ -776,7 +775,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
size += keyLen; size += keyLen;
size_t valueLen = 0; size_t valueLen = 0;
valueLen = strlen(stb); valueLen = strlen(stb) + 1;
size += sizeof(int32_t); size += sizeof(int32_t);
size += valueLen; size += valueLen;
stb = taosHashIterate(pUser->writeViews, stb); stb = taosHashIterate(pUser->writeViews, stb);
@ -790,11 +789,21 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
size += keyLen; size += keyLen;
size_t valueLen = 0; size_t valueLen = 0;
valueLen = strlen(stb); valueLen = strlen(stb) + 1;
size += sizeof(int32_t); size += sizeof(int32_t);
size += valueLen; size += valueLen;
stb = taosHashIterate(pUser->alterViews, stb); stb = taosHashIterate(pUser->alterViews, stb);
} }
int32_t *useDb = taosHashIterate(pUser->useDbs, NULL);
while (useDb != NULL) {
size_t keyLen = 0;
void *key = taosHashGetKey(useDb, &keyLen);
size += sizeof(int32_t);
size += keyLen;
size += sizeof(int32_t);
useDb = taosHashIterate(pUser->useDbs, useDb);
}
SSdbRaw *pRaw = sdbAllocRaw(SDB_USER, USER_VER_NUMBER, size); SSdbRaw *pRaw = sdbAllocRaw(SDB_USER, USER_VER_NUMBER, size);
if (pRaw == NULL) goto _OVER; if (pRaw == NULL) goto _OVER;
@ -925,7 +934,7 @@ SSdbRaw *mndUserActionEncode(SUserObj *pUser) {
stb = taosHashIterate(pUser->alterViews, stb); stb = taosHashIterate(pUser->alterViews, stb);
} }
int32_t *useDb = taosHashIterate(pUser->useDbs, NULL); useDb = taosHashIterate(pUser->useDbs, NULL);
while (useDb != NULL) { while (useDb != NULL) {
size_t keyLen = 0; size_t keyLen = 0;
void *key = taosHashGetKey(useDb, &keyLen); void *key = taosHashGetKey(useDb, &keyLen);

View File

@ -149,7 +149,8 @@ typedef enum {
SDB_FUNC = 20, SDB_FUNC = 20,
SDB_IDX = 21, SDB_IDX = 21,
SDB_VIEW = 22, SDB_VIEW = 22,
SDB_MAX = 23 SDB_STREAM_SEQ = 23,
SDB_MAX = 24
} ESdbType; } ESdbType;
typedef struct SSdbRaw { typedef struct SSdbRaw {
@ -169,11 +170,11 @@ typedef struct SSdbRow {
} SSdbRow; } SSdbRow;
typedef struct SSdb { typedef struct SSdb {
SMnode *pMnode; SMnode * pMnode;
SWal *pWal; SWal * pWal;
int64_t sync; int64_t sync;
char *currDir; char * currDir;
char *tmpDir; char * tmpDir;
int64_t commitIndex; int64_t commitIndex;
int64_t commitTerm; int64_t commitTerm;
int64_t commitConfig; int64_t commitConfig;
@ -183,7 +184,7 @@ typedef struct SSdb {
int64_t tableVer[SDB_MAX]; int64_t tableVer[SDB_MAX];
int64_t maxId[SDB_MAX]; int64_t maxId[SDB_MAX];
EKeyType keyTypes[SDB_MAX]; EKeyType keyTypes[SDB_MAX];
SHashObj *hashObjs[SDB_MAX]; SHashObj * hashObjs[SDB_MAX];
TdThreadRwlock locks[SDB_MAX]; TdThreadRwlock locks[SDB_MAX];
SdbInsertFp insertFps[SDB_MAX]; SdbInsertFp insertFps[SDB_MAX];
SdbUpdateFp updateFps[SDB_MAX]; SdbUpdateFp updateFps[SDB_MAX];
@ -198,25 +199,25 @@ typedef struct SSdb {
typedef struct SSdbIter { typedef struct SSdbIter {
TdFilePtr file; TdFilePtr file;
int64_t total; int64_t total;
char *name; char * name;
} SSdbIter; } SSdbIter;
typedef struct { typedef struct {
ESdbType sdbType; ESdbType sdbType;
EKeyType keyType; EKeyType keyType;
SdbDeployFp deployFp; SdbDeployFp deployFp;
SdbEncodeFp encodeFp; SdbEncodeFp encodeFp;
SdbDecodeFp decodeFp; SdbDecodeFp decodeFp;
SdbInsertFp insertFp; SdbInsertFp insertFp;
SdbUpdateFp updateFp; SdbUpdateFp updateFp;
SdbDeleteFp deleteFp; SdbDeleteFp deleteFp;
SdbValidateFp validateFp; SdbValidateFp validateFp;
} SSdbTable; } SSdbTable;
typedef struct SSdbOpt { typedef struct SSdbOpt {
const char *path; const char *path;
SMnode *pMnode; SMnode * pMnode;
SWal *pWal; SWal * pWal;
int64_t sync; int64_t sync;
} SSdbOpt; } SSdbOpt;
@ -393,7 +394,7 @@ int32_t sdbGetRawSoftVer(SSdbRaw *pRaw, int8_t *sver);
int32_t sdbGetRawTotalSize(SSdbRaw *pRaw); int32_t sdbGetRawTotalSize(SSdbRaw *pRaw);
SSdbRow *sdbAllocRow(int32_t objSize); SSdbRow *sdbAllocRow(int32_t objSize);
void *sdbGetRowObj(SSdbRow *pRow); void * sdbGetRowObj(SSdbRow *pRow);
void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc); void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc);
int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter, int64_t *index, int64_t *term, int64_t *config); int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter, int64_t *index, int64_t *term, int64_t *config);

View File

@ -13,31 +13,31 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include "rsync.h"
#include "executor.h" #include "executor.h"
#include "rsync.h"
#include "sndInt.h" #include "sndInt.h"
#include "tqCommon.h" #include "tqCommon.h"
#include "tuuid.h" #include "tuuid.h"
#define sndError(...) \ #define sndError(...) \
do { \ do { \
if (sndDebugFlag & DEBUG_ERROR) { \ if (sndDebugFlag & DEBUG_ERROR) { \
taosPrintLog("SND ERROR ", DEBUG_ERROR, sndDebugFlag, __VA_ARGS__); \ taosPrintLog("SND ERROR ", DEBUG_ERROR, sndDebugFlag, __VA_ARGS__); \
} \ } \
} while (0) } while (0)
#define sndInfo(...) \ #define sndInfo(...) \
do { \ do { \
if (sndDebugFlag & DEBUG_INFO) { \ if (sndDebugFlag & DEBUG_INFO) { \
taosPrintLog("SND INFO ", DEBUG_INFO, sndDebugFlag, __VA_ARGS__); \ taosPrintLog("SND INFO ", DEBUG_INFO, sndDebugFlag, __VA_ARGS__); \
} \ } \
} while (0) } while (0)
#define sndDebug(...) \ #define sndDebug(...) \
do { \ do { \
if (sndDebugFlag & DEBUG_DEBUG) { \ if (sndDebugFlag & DEBUG_DEBUG) { \
taosPrintLog("SND ", DEBUG_DEBUG, sndDebugFlag, __VA_ARGS__); \ taosPrintLog("SND ", DEBUG_DEBUG, sndDebugFlag, __VA_ARGS__); \
} \ } \
} while (0) } while (0)
int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer) { int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer) {
@ -46,10 +46,11 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
pTask->pBackend = NULL;
streamTaskOpenAllUpstreamInput(pTask); streamTaskOpenAllUpstreamInput(pTask);
SStreamTask* pSateTask = pTask; SStreamTask *pSateTask = pTask;
SStreamTask task = {0}; SStreamTask task = {0};
if (pTask->info.fillHistory) { if (pTask->info.fillHistory) {
task.id.streamId = pTask->streamTaskId.streamId; task.id.streamId = pTask->streamTaskId.streamId;
@ -84,7 +85,7 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer
streamTaskResetUpstreamStageInfo(pTask); streamTaskResetUpstreamStageInfo(pTask);
streamSetupScheduleTrigger(pTask); streamSetupScheduleTrigger(pTask);
SCheckpointInfo* pChkInfo = &pTask->chkInfo; SCheckpointInfo *pChkInfo = &pTask->chkInfo;
// checkpoint ver is the kept version, handled data should be the next version. // checkpoint ver is the kept version, handled data should be the next version.
if (pTask->chkInfo.checkpointId != 0) { if (pTask->chkInfo.checkpointId != 0) {
pTask->chkInfo.nextProcessVer = pTask->chkInfo.checkpointVer + 1; pTask->chkInfo.nextProcessVer = pTask->chkInfo.checkpointVer + 1;
@ -92,7 +93,7 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProcessVer
pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer); pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer);
} }
char* p = NULL; char *p = NULL;
streamTaskGetStatus(pTask, &p); streamTaskGetStatus(pTask, &p);
if (pTask->info.fillHistory) { if (pTask->info.fillHistory) {
@ -194,7 +195,7 @@ int32_t sndProcessStreamMsg(SSnode *pSnode, SRpcMsg *pMsg) {
int32_t sndProcessWriteMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg *pRsp) { int32_t sndProcessWriteMsg(SSnode *pSnode, SRpcMsg *pMsg, SRpcMsg *pRsp) {
switch (pMsg->msgType) { switch (pMsg->msgType) {
case TDMT_STREAM_TASK_DEPLOY: { case TDMT_STREAM_TASK_DEPLOY: {
void *pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); void * pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
int32_t len = pMsg->contLen - sizeof(SMsgHead); int32_t len = pMsg->contLen - sizeof(SMsgHead);
return tqStreamTaskProcessDeployReq(pSnode->pMeta, -1, pReq, len, true, true); return tqStreamTaskProcessDeployReq(pSnode->pMeta, -1, pReq, len, true, true);
} }

View File

@ -14,6 +14,8 @@ set(
"src/vnd/vnodeSnapshot.c" "src/vnd/vnodeSnapshot.c"
"src/vnd/vnodeRetention.c" "src/vnd/vnodeRetention.c"
"src/vnd/vnodeInitApi.c" "src/vnd/vnodeInitApi.c"
"src/vnd/vnodeAsync.c"
"src/vnd/vnodeHash.c"
# meta # meta
"src/meta/metaOpen.c" "src/meta/metaOpen.c"

View File

@ -309,7 +309,12 @@ int32_t tsdbTakeReadSnap2(STsdbReader *pReader, _query_reseek_func_t reseek, STs
void tsdbUntakeReadSnap2(STsdbReader *pReader, STsdbReadSnap *pSnap, bool proactive); void tsdbUntakeReadSnap2(STsdbReader *pReader, STsdbReadSnap *pSnap, bool proactive);
// tsdbMerge.c ============================================================================================== // tsdbMerge.c ==============================================================================================
int32_t tsdbSchedMerge(STsdb *tsdb, int32_t fid); typedef struct {
STsdb *tsdb;
int32_t fid;
} SMergeArg;
int32_t tsdbMerge(void *arg);
// tsdbDiskData ============================================================================================== // tsdbDiskData ==============================================================================================
int32_t tDiskDataBuilderCreate(SDiskDataBuilder **ppBuilder); int32_t tDiskDataBuilderCreate(SDiskDataBuilder **ppBuilder);

View File

@ -48,9 +48,32 @@ int32_t vnodeCheckCfg(const SVnodeCfg*);
int32_t vnodeEncodeConfig(const void* pObj, SJson* pJson); int32_t vnodeEncodeConfig(const void* pObj, SJson* pJson);
int32_t vnodeDecodeConfig(const SJson* pJson, void* pObj); int32_t vnodeDecodeConfig(const SJson* pJson, void* pObj);
// vnodeAsync.c
typedef struct SVAsync SVAsync;
typedef enum {
EVA_PRIORITY_HIGH = 0,
EVA_PRIORITY_NORMAL,
EVA_PRIORITY_LOW,
} EVAPriority;
#define VNODE_ASYNC_VALID_CHANNEL_ID(channelId) ((channelId) > 0)
#define VNODE_ASYNC_VALID_TASK_ID(taskId) ((taskId) > 0)
int32_t vnodeAsyncInit(SVAsync** async, char* label);
int32_t vnodeAsyncDestroy(SVAsync** async);
int32_t vnodeAChannelInit(SVAsync* async, int64_t* channelId);
int32_t vnodeAChannelDestroy(SVAsync* async, int64_t channelId, bool waitRunning);
int32_t vnodeAsync(SVAsync* async, EVAPriority priority, int32_t (*execute)(void*), void (*complete)(void*), void* arg,
int64_t* taskId);
int32_t vnodeAsyncC(SVAsync* async, int64_t channelId, EVAPriority priority, int32_t (*execute)(void*),
void (*complete)(void*), void* arg, int64_t* taskId);
int32_t vnodeAWait(SVAsync* async, int64_t taskId);
int32_t vnodeACancel(SVAsync* async, int64_t taskId);
int32_t vnodeAsyncSetWorkers(SVAsync* async, int32_t numWorkers);
// vnodeModule.c // vnodeModule.c
int vnodeScheduleTask(int (*execute)(void*), void* arg); extern SVAsync* vnodeAsyncHandle[2];
int vnodeScheduleTaskEx(int tpid, int (*execute)(void*), void* arg);
// vnodeBufPool.c // vnodeBufPool.c
typedef struct SVBufPoolNode SVBufPoolNode; typedef struct SVBufPoolNode SVBufPoolNode;
@ -110,7 +133,7 @@ int32_t vnodeAsyncCommit(SVnode* pVnode);
bool vnodeShouldRollback(SVnode* pVnode); bool vnodeShouldRollback(SVnode* pVnode);
// vnodeSync.c // vnodeSync.c
int32_t vnodeSyncOpen(SVnode *pVnode, char *path, int32_t vnodeVersion); int32_t vnodeSyncOpen(SVnode* pVnode, char* path, int32_t vnodeVersion);
int32_t vnodeSyncStart(SVnode* pVnode); int32_t vnodeSyncStart(SVnode* pVnode);
void vnodeSyncPreClose(SVnode* pVnode); void vnodeSyncPreClose(SVnode* pVnode);
void vnodeSyncPostClose(SVnode* pVnode); void vnodeSyncPostClose(SVnode* pVnode);

View File

@ -93,7 +93,11 @@ typedef struct SQueryNode SQueryNode;
#define VNODE_RSMA2_DIR "rsma2" #define VNODE_RSMA2_DIR "rsma2"
#define VNODE_TQ_STREAM "stream" #define VNODE_TQ_STREAM "stream"
#if SUSPEND_RESUME_TEST // only for test purpose
#define VNODE_BUFPOOL_SEGMENTS 1
#else
#define VNODE_BUFPOOL_SEGMENTS 3 #define VNODE_BUFPOOL_SEGMENTS 3
#endif
#define VND_INFO_FNAME "vnode.json" #define VND_INFO_FNAME "vnode.json"
#define VND_INFO_FNAME_TMP "vnode_tmp.json" #define VND_INFO_FNAME_TMP "vnode_tmp.json"
@ -209,7 +213,7 @@ int32_t tsdbBegin(STsdb* pTsdb);
// int32_t tsdbCommit(STsdb* pTsdb, SCommitInfo* pInfo); // int32_t tsdbCommit(STsdb* pTsdb, SCommitInfo* pInfo);
int32_t tsdbCacheCommit(STsdb* pTsdb); int32_t tsdbCacheCommit(STsdb* pTsdb);
int32_t tsdbCompact(STsdb* pTsdb, SCompactInfo* pInfo); int32_t tsdbCompact(STsdb* pTsdb, SCompactInfo* pInfo);
int32_t tsdbRetention(STsdb *tsdb, int64_t now, int32_t sync); int32_t tsdbRetention(STsdb* tsdb, int64_t now, int32_t sync);
// int32_t tsdbFinishCommit(STsdb* pTsdb); // int32_t tsdbFinishCommit(STsdb* pTsdb);
// int32_t tsdbRollbackCommit(STsdb* pTsdb); // int32_t tsdbRollbackCommit(STsdb* pTsdb);
int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq2* pMsg); int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq2* pMsg);
@ -448,13 +452,16 @@ struct SVnode {
SVBufPool* recycleTail; SVBufPool* recycleTail;
SVBufPool* onRecycle; SVBufPool* onRecycle;
// commit variables
int64_t commitChannel;
int64_t commitTask;
SMeta* pMeta; SMeta* pMeta;
SSma* pSma; SSma* pSma;
STsdb* pTsdb; STsdb* pTsdb;
SWal* pWal; SWal* pWal;
STQ* pTq; STQ* pTq;
SSink* pSink; SSink* pSink;
tsem_t canCommit;
int64_t sync; int64_t sync;
TdThreadMutex lock; TdThreadMutex lock;
bool blocked; bool blocked;
@ -494,18 +501,18 @@ struct SSma {
void* pRSmaEnv; void* pRSmaEnv;
}; };
#define SMA_CFG(s) (&(s)->pVnode->config) #define SMA_CFG(s) (&(s)->pVnode->config)
#define SMA_TSDB_CFG(s) (&(s)->pVnode->config.tsdbCfg) #define SMA_TSDB_CFG(s) (&(s)->pVnode->config.tsdbCfg)
#define SMA_RETENTION(s) ((SRetention*)&(s)->pVnode->config.tsdbCfg.retentions) #define SMA_RETENTION(s) ((SRetention*)&(s)->pVnode->config.tsdbCfg.retentions)
#define SMA_LOCKED(s) ((s)->locked) #define SMA_LOCKED(s) ((s)->locked)
#define SMA_META(s) ((s)->pVnode->pMeta) #define SMA_META(s) ((s)->pVnode->pMeta)
#define SMA_VID(s) TD_VID((s)->pVnode) #define SMA_VID(s) TD_VID((s)->pVnode)
#define SMA_TFS(s) ((s)->pVnode->pTfs) #define SMA_TFS(s) ((s)->pVnode->pTfs)
#define SMA_TSMA_ENV(s) ((s)->pTSmaEnv) #define SMA_TSMA_ENV(s) ((s)->pTSmaEnv)
#define SMA_RSMA_ENV(s) ((s)->pRSmaEnv) #define SMA_RSMA_ENV(s) ((s)->pRSmaEnv)
#define SMA_RSMA_TSDB0(s) ((s)->pVnode->pTsdb) #define SMA_RSMA_TSDB0(s) ((s)->pVnode->pTsdb)
#define SMA_RSMA_TSDB1(s) ((s)->pRSmaTsdb[TSDB_RETENTION_L0]) #define SMA_RSMA_TSDB1(s) ((s)->pRSmaTsdb[TSDB_RETENTION_L0])
#define SMA_RSMA_TSDB2(s) ((s)->pRSmaTsdb[TSDB_RETENTION_L1]) #define SMA_RSMA_TSDB2(s) ((s)->pRSmaTsdb[TSDB_RETENTION_L1])
#define SMA_RSMA_GET_TSDB(pVnode, level) ((level == 0) ? pVnode->pTsdb : pVnode->pSma->pRSmaTsdb[level - 1]) #define SMA_RSMA_GET_TSDB(pVnode, level) ((level == 0) ? pVnode->pTsdb : pVnode->pSma->pRSmaTsdb[level - 1])
// sma // sma

View File

@ -61,7 +61,7 @@ struct SRSmaQTaskInfoItem {
int32_t len; int32_t len;
int8_t type; int8_t type;
int64_t suid; int64_t suid;
void *qTaskInfo; void * qTaskInfo;
}; };
static void tdRSmaQTaskInfoFree(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) { static void tdRSmaQTaskInfoFree(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) {
@ -185,7 +185,7 @@ int32_t tdUpdateTbUidList(SSma *pSma, STbUidStore *pStore, bool isAdd) {
void *pIter = NULL; void *pIter = NULL;
while ((pIter = taosHashIterate(pStore->uidHash, pIter))) { while ((pIter = taosHashIterate(pStore->uidHash, pIter))) {
tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
SArray *pTbUids = *(SArray **)pIter; SArray * pTbUids = *(SArray **)pIter;
if (tdUpdateTbUidListImpl(pSma, pTbSuid, pTbUids, isAdd) != TSDB_CODE_SUCCESS) { if (tdUpdateTbUidListImpl(pSma, pTbSuid, pTbUids, isAdd) != TSDB_CODE_SUCCESS) {
taosHashCancelIterate(pStore->uidHash, pIter); taosHashCancelIterate(pStore->uidHash, pIter);
@ -213,7 +213,7 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui
} }
SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv); SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
SHashObj *infoHash = NULL; SHashObj * infoHash = NULL;
if (!pStat || !(infoHash = RSMA_INFO_HASH(pStat))) { if (!pStat || !(infoHash = RSMA_INFO_HASH(pStat))) {
terrno = TSDB_CODE_RSMA_INVALID_STAT; terrno = TSDB_CODE_RSMA_INVALID_STAT;
return TSDB_CODE_FAILED; return TSDB_CODE_FAILED;
@ -264,11 +264,11 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
int8_t idx) { int8_t idx) {
if ((param->qmsgLen > 0) && param->qmsg[idx]) { if ((param->qmsgLen > 0) && param->qmsg[idx]) {
SRSmaInfoItem *pItem = &(pRSmaInfo->items[idx]); SRSmaInfoItem *pItem = &(pRSmaInfo->items[idx]);
SRetention *pRetention = SMA_RETENTION(pSma); SRetention * pRetention = SMA_RETENTION(pSma);
STsdbCfg *pTsdbCfg = SMA_TSDB_CFG(pSma); STsdbCfg * pTsdbCfg = SMA_TSDB_CFG(pSma);
SVnode *pVnode = pSma->pVnode; SVnode * pVnode = pSma->pVnode;
char taskInfDir[TSDB_FILENAME_LEN] = {0}; char taskInfDir[TSDB_FILENAME_LEN] = {0};
void *pStreamState = NULL; void * pStreamState = NULL;
// set the backend of stream state // set the backend of stream state
tdRSmaQTaskInfoGetFullPath(pVnode, pRSmaInfo->suid, idx + 1, pVnode->pTfs, taskInfDir); tdRSmaQTaskInfoGetFullPath(pVnode, pRSmaInfo->suid, idx + 1, pVnode->pTfs, taskInfDir);
@ -297,6 +297,8 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
sprintf(pStreamTask->exec.qmsg, "%s", RSMA_EXEC_TASK_FLAG); sprintf(pStreamTask->exec.qmsg, "%s", RSMA_EXEC_TASK_FLAG);
pStreamTask->chkInfo.checkpointId = streamMetaGetLatestCheckpointId(pStreamTask->pMeta); pStreamTask->chkInfo.checkpointId = streamMetaGetLatestCheckpointId(pStreamTask->pMeta);
tdRSmaTaskInit(pStreamTask->pMeta, pItem, &pStreamTask->id); tdRSmaTaskInit(pStreamTask->pMeta, pItem, &pStreamTask->id);
pStreamTask->status.pSM = streamCreateStateMachine(pStreamTask);
pStreamState = streamStateOpen(taskInfDir, pStreamTask, true, -1, -1); pStreamState = streamStateOpen(taskInfDir, pStreamTask, true, -1, -1);
if (!pStreamState) { if (!pStreamState) {
terrno = TSDB_CODE_RSMA_STREAM_STATE_OPEN; terrno = TSDB_CODE_RSMA_STREAM_STATE_OPEN;
@ -372,7 +374,7 @@ int32_t tdRSmaProcessCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con
} }
#endif #endif
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); SSmaEnv * pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv); SRSmaStat *pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
SRSmaInfo *pRSmaInfo = NULL; SRSmaInfo *pRSmaInfo = NULL;
@ -651,9 +653,7 @@ static int32_t tdRSmaProcessDelReq(SSma *pSma, int64_t suid, int8_t level, SBatc
((SMsgHead *)pBuf)->vgId = TD_VID(pSma->pVnode); ((SMsgHead *)pBuf)->vgId = TD_VID(pSma->pVnode);
SRpcMsg delMsg = {.msgType = TDMT_VND_BATCH_DEL, SRpcMsg delMsg = {.msgType = TDMT_VND_BATCH_DEL, .pCont = pBuf, .contLen = len + sizeof(SMsgHead)};
.pCont = pBuf,
.contLen = len + sizeof(SMsgHead)};
code = tmsgPutToQueue(&pSma->pVnode->msgCb, WRITE_QUEUE, &delMsg); code = tmsgPutToQueue(&pSma->pVnode->msgCb, WRITE_QUEUE, &delMsg);
TSDB_CHECK_CODE(code, lino, _exit); TSDB_CHECK_CODE(code, lino, _exit);
} }
@ -673,8 +673,8 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma
int32_t code = 0; int32_t code = 0;
int32_t lino = 0; int32_t lino = 0;
SSDataBlock *output = NULL; SSDataBlock *output = NULL;
SArray *pResList = pItem->pResList; SArray * pResList = pItem->pResList;
STSchema *pTSchema = pInfo->pTSchema; STSchema * pTSchema = pInfo->pTSchema;
int64_t suid = pInfo->suid; int64_t suid = pInfo->suid;
while (1) { while (1) {
@ -733,7 +733,7 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma
} }
} }
STsdb *sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]); STsdb * sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]);
SSubmitReq2 *pReq = NULL; SSubmitReq2 *pReq = NULL;
if (buildSubmitReqFromDataBlock(&pReq, output, pTSchema, output->info.id.groupId, SMA_VID(pSma), suid) < 0) { if (buildSubmitReqFromDataBlock(&pReq, output, pTSchema, output->info.id.groupId, SMA_VID(pSma), suid) < 0) {
@ -795,7 +795,7 @@ _exit:
static int32_t tdExecuteRSmaImplAsync(SSma *pSma, int64_t version, const void *pMsg, int32_t len, int32_t inputType, static int32_t tdExecuteRSmaImplAsync(SSma *pSma, int64_t version, const void *pMsg, int32_t len, int32_t inputType,
SRSmaInfo *pInfo, tb_uid_t suid) { SRSmaInfo *pInfo, tb_uid_t suid) {
int32_t size = RSMA_EXEC_MSG_HLEN + len; // header + payload int32_t size = RSMA_EXEC_MSG_HLEN + len; // header + payload
void *qItem = taosAllocateQitem(size, DEF_QITEM, 0); void * qItem = taosAllocateQitem(size, DEF_QITEM, 0);
if (!qItem) { if (!qItem) {
return TSDB_CODE_FAILED; return TSDB_CODE_FAILED;
@ -870,10 +870,10 @@ static int32_t tdRsmaPrintSubmitReq(SSma *pSma, SSubmitReq *pReq) {
* @param level * @param level
* @return int32_t * @return int32_t
*/ */
static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize, int64_t version, int32_t inputType, SRSmaInfo *pInfo, static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize, int64_t version, int32_t inputType,
ERsmaExecType type, int8_t level) { SRSmaInfo *pInfo, ERsmaExecType type, int8_t level) {
int32_t idx = level - 1; int32_t idx = level - 1;
void *qTaskInfo = RSMA_INFO_QTASK(pInfo, idx); void * qTaskInfo = RSMA_INFO_QTASK(pInfo, idx);
SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, idx); SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, idx);
if (!qTaskInfo) { if (!qTaskInfo) {
@ -887,8 +887,9 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize,
return TSDB_CODE_FAILED; return TSDB_CODE_FAILED;
} }
smaDebug("vgId:%d, execute rsma %" PRIi8 " task for qTaskInfo:%p, suid:%" PRIu64 ", nMsg:%d, submitReqVer:%" PRIi64 ", inputType:%d", SMA_VID(pSma), level, smaDebug("vgId:%d, execute rsma %" PRIi8 " task for qTaskInfo:%p, suid:%" PRIu64 ", nMsg:%d, submitReqVer:%" PRIi64
RSMA_INFO_QTASK(pInfo, idx), pInfo->suid, msgSize, version, inputType); ", inputType:%d",
SMA_VID(pSma), level, RSMA_INFO_QTASK(pInfo, idx), pInfo->suid, msgSize, version, inputType);
if ((terrno = qSetSMAInput(qTaskInfo, pMsg, msgSize, inputType)) < 0) { if ((terrno = qSetSMAInput(qTaskInfo, pMsg, msgSize, inputType)) < 0) {
smaError("vgId:%d, rsma %" PRIi8 " qSetStreamInput failed since %s", SMA_VID(pSma), level, tstrerror(terrno)); smaError("vgId:%d, rsma %" PRIi8 " qSetStreamInput failed since %s", SMA_VID(pSma), level, tstrerror(terrno));
@ -912,7 +913,7 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize,
static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) { static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) {
int32_t code = 0; int32_t code = 0;
int32_t lino = 0; int32_t lino = 0;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); SSmaEnv * pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pStat = NULL; SRSmaStat *pStat = NULL;
SRSmaInfo *pRSmaInfo = NULL; SRSmaInfo *pRSmaInfo = NULL;
@ -1067,8 +1068,8 @@ _err:
static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) { static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) {
int32_t code = 0; int32_t code = 0;
int32_t lino = 0; int32_t lino = 0;
SVnode *pVnode = pSma->pVnode; SVnode * pVnode = pSma->pVnode;
SArray *suidList = NULL; SArray * suidList = NULL;
STbUidStore uidStore = {0}; STbUidStore uidStore = {0};
SMetaReader mr = {0}; SMetaReader mr = {0};
tb_uid_t suid = 0; tb_uid_t suid = 0;
@ -1196,7 +1197,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
int32_t code = 0; int32_t code = 0;
int32_t lino = 0; int32_t lino = 0;
int32_t nTaskInfo = 0; int32_t nTaskInfo = 0;
SSma *pSma = pRSmaStat->pSma; SSma * pSma = pRSmaStat->pSma;
SVnode *pVnode = pSma->pVnode; SVnode *pVnode = pSma->pVnode;
if (taosHashGetSize(pInfoHash) <= 0) { if (taosHashGetSize(pInfoHash) <= 0) {
@ -1229,7 +1230,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
do { do {
int32_t nStreamFlushed = 0; int32_t nStreamFlushed = 0;
int32_t nSleep = 0; int32_t nSleep = 0;
void *infoHash = NULL; void * infoHash = NULL;
while (true) { while (true) {
while ((infoHash = taosHashIterate(pInfoHash, infoHash))) { while ((infoHash = taosHashIterate(pInfoHash, infoHash))) {
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)infoHash; SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)infoHash;
@ -1271,7 +1272,7 @@ _checkpoint:
SStreamMeta *pMeta = NULL; SStreamMeta *pMeta = NULL;
int64_t checkpointId = taosGetTimestampNs(); int64_t checkpointId = taosGetTimestampNs();
bool checkpointBuilt = false; bool checkpointBuilt = false;
void *infoHash = NULL; void * infoHash = NULL;
while ((infoHash = taosHashIterate(pInfoHash, infoHash))) { while ((infoHash = taosHashIterate(pInfoHash, infoHash))) {
SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)infoHash; SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)infoHash;
if (RSMA_INFO_IS_DEL(pRSmaInfo)) { if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
@ -1282,11 +1283,12 @@ _checkpoint:
SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pRSmaInfo, i); SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pRSmaInfo, i);
if (pItem && pItem->pStreamTask) { if (pItem && pItem->pStreamTask) {
SStreamTask *pTask = pItem->pStreamTask; SStreamTask *pTask = pItem->pStreamTask;
atomic_store_32(&pTask->pMeta->chkptNotReadyTasks, 1); // atomic_store_32(&pTask->pMeta->chkptNotReadyTasks, 1);
pTask->checkpointingId = checkpointId; pTask->chkInfo.checkpointingId = checkpointId;
pTask->chkInfo.checkpointId = pTask->checkpointingId; pTask->chkInfo.checkpointId = checkpointId; // 1pTask->checkpointingId;
pTask->chkInfo.checkpointVer = pItem->submitReqVer; pTask->chkInfo.checkpointVer = pItem->submitReqVer;
pTask->info.triggerParam = pItem->fetchResultVer; pTask->info.triggerParam = pItem->fetchResultVer;
pTask->info.taskLevel = TASK_LEVEL_SMA;
if (!checkpointBuilt) { if (!checkpointBuilt) {
// the stream states share one checkpoint // the stream states share one checkpoint
@ -1342,10 +1344,10 @@ _exit:
* @param tmrId * @param tmrId
*/ */
static void tdRSmaFetchTrigger(void *param, void *tmrId) { static void tdRSmaFetchTrigger(void *param, void *tmrId) {
SRSmaRef *pRSmaRef = NULL; SRSmaRef * pRSmaRef = NULL;
SSma *pSma = NULL; SSma * pSma = NULL;
SRSmaStat *pStat = NULL; SRSmaStat * pStat = NULL;
SRSmaInfo *pRSmaInfo = NULL; SRSmaInfo * pRSmaInfo = NULL;
SRSmaInfoItem *pItem = NULL; SRSmaInfoItem *pItem = NULL;
if (!(pRSmaRef = taosHashGet(smaMgmt.refHash, &param, POINTER_BYTES))) { if (!(pRSmaRef = taosHashGet(smaMgmt.refHash, &param, POINTER_BYTES))) {
@ -1513,7 +1515,7 @@ _err:
} }
static int32_t tdRSmaBatchExec(SSma *pSma, SRSmaInfo *pInfo, STaosQall *qall, SArray *pSubmitArr, ERsmaExecType type) { static int32_t tdRSmaBatchExec(SSma *pSma, SRSmaInfo *pInfo, STaosQall *qall, SArray *pSubmitArr, ERsmaExecType type) {
void *msg = NULL; void * msg = NULL;
int8_t resume = 0; int8_t resume = 0;
int32_t nSubmit = 0; int32_t nSubmit = 0;
int32_t nDelete = 0; int32_t nDelete = 0;
@ -1628,11 +1630,11 @@ _err:
int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) { int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
int32_t code = 0; int32_t code = 0;
int32_t lino = 0; int32_t lino = 0;
SVnode *pVnode = pSma->pVnode; SVnode * pVnode = pSma->pVnode;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); SSmaEnv * pEnv = SMA_RSMA_ENV(pSma);
SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pEnv); SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
SHashObj *infoHash = NULL; SHashObj * infoHash = NULL;
SArray *pSubmitArr = NULL; SArray * pSubmitArr = NULL;
bool isFetchAll = false; bool isFetchAll = false;
if (!pRSmaStat || !(infoHash = RSMA_INFO_HASH(pRSmaStat))) { if (!pRSmaStat || !(infoHash = RSMA_INFO_HASH(pRSmaStat))) {
@ -1731,4 +1733,4 @@ _exit:
smaError("vgId:%d, %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code)); smaError("vgId:%d, %s failed at line %d since %s", TD_VID(pVnode), __func__, lino, tstrerror(code));
} }
return code; return code;
} }

View File

@ -750,21 +750,27 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
SStreamTask* pStateTask = pTask; SStreamTask* pStateTask = pTask;
SStreamTask task = {0};
STaskId taskId = {.streamId = 0, .taskId = 0};
if (pTask->info.fillHistory) { if (pTask->info.fillHistory) {
task.id.streamId = pTask->streamTaskId.streamId; taskId.streamId = pTask->id.streamId;
task.id.taskId = pTask->streamTaskId.taskId; taskId.taskId = pTask->id.taskId;
task.pMeta = pTask->pMeta;
pStateTask = &task; pTask->id.streamId = pTask->streamTaskId.streamId;
pTask->id.taskId = pTask->streamTaskId.taskId;
} }
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pStateTask, false, -1, -1); pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1);
if (pTask->pState == NULL) { if (pTask->pState == NULL) {
tqError("s-task:%s (vgId:%d) failed to open state for task", pTask->id.idStr, vgId); tqError("s-task:%s (vgId:%d) failed to open state for task", pTask->id.idStr, vgId);
return -1; return -1;
} else { } else {
tqDebug("s-task:%s state:%p", pTask->id.idStr, pTask->pState); tqDebug("s-task:%s state:%p", pTask->id.idStr, pTask->pState);
} }
if (pTask->info.fillHistory) {
pTask->id.streamId = taskId.streamId;
pTask->id.taskId = taskId.taskId;
}
SReadHandle handle = { SReadHandle handle = {
.checkpointId = pTask->chkInfo.checkpointId, .checkpointId = pTask->chkInfo.checkpointId,
@ -785,15 +791,17 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
qSetTaskId(pTask->exec.pExecutor, pTask->id.taskId, pTask->id.streamId); qSetTaskId(pTask->exec.pExecutor, pTask->id.taskId, pTask->id.streamId);
} else if (pTask->info.taskLevel == TASK_LEVEL__AGG) { } else if (pTask->info.taskLevel == TASK_LEVEL__AGG) {
SStreamTask* pSateTask = pTask; SStreamTask* pSateTask = pTask;
SStreamTask task = {0}; // SStreamTask task = {0};
STaskId taskId = {.streamId = 0, .taskId = 0};
if (pTask->info.fillHistory) { if (pTask->info.fillHistory) {
task.id.streamId = pTask->streamTaskId.streamId; taskId.streamId = pTask->id.streamId;
task.id.taskId = pTask->streamTaskId.taskId; taskId.taskId = pTask->id.taskId;
task.pMeta = pTask->pMeta; pTask->id.streamId = pTask->streamTaskId.streamId;
pSateTask = &task; pTask->id.taskId = pTask->streamTaskId.taskId;
} }
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pSateTask, false, -1, -1); pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1);
if (pTask->pState == NULL) { if (pTask->pState == NULL) {
tqError("s-task:%s (vgId:%d) failed to open state for task", pTask->id.idStr, vgId); tqError("s-task:%s (vgId:%d) failed to open state for task", pTask->id.idStr, vgId);
return -1; return -1;
@ -801,6 +809,11 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t nextProcessVer) {
tqDebug("s-task:%s state:%p", pTask->id.idStr, pTask->pState); tqDebug("s-task:%s state:%p", pTask->id.idStr, pTask->pState);
} }
if (pTask->info.fillHistory) {
pTask->id.streamId = taskId.streamId;
pTask->id.taskId = taskId.taskId;
}
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList); int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList);
SReadHandle handle = { SReadHandle handle = {
.checkpointId = pTask->chkInfo.checkpointId, .checkpointId = pTask->chkInfo.checkpointId,
@ -1280,14 +1293,13 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
// downstream not ready, current the stream tasks are not all ready. Ignore this checkpoint req.
if (pTask->status.downstreamReady != 1) { if (pTask->status.downstreamReady != 1) {
pTask->chkInfo.failedId = req.checkpointId; // record the latest failed checkpoint id pTask->chkInfo.failedId = req.checkpointId; // record the latest failed checkpoint id
pTask->checkpointingId = req.checkpointId; pTask->chkInfo.checkpointingId = req.checkpointId;
qError("s-task:%s not ready for checkpoint, since downstream not ready, ignore this checkpoint:%" PRId64 tqError("s-task:%s not ready for checkpoint, since downstream not ready, ignore this checkpoint:%" PRId64
", set it failure", ", set it failure",
pTask->id.idStr, req.checkpointId); pTask->id.idStr, req.checkpointId);
streamMetaReleaseTask(pMeta, pTask); streamMetaReleaseTask(pMeta, pTask);
SRpcMsg rsp = {0}; SRpcMsg rsp = {0};
@ -1316,10 +1328,10 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
// check if the checkpoint msg already sent or not. // check if the checkpoint msg already sent or not.
if (status == TASK_STATUS__CK) { if (status == TASK_STATUS__CK) {
ASSERT(pTask->checkpointingId == req.checkpointId); ASSERT(pTask->chkInfo.checkpointingId == req.checkpointId);
tqWarn("s-task:%s recv checkpoint-source msg again checkpointId:%" PRId64 tqWarn("s-task:%s recv checkpoint-source msg again checkpointId:%" PRId64
" already received, ignore this msg and continue process checkpoint", " already received, ignore this msg and continue process checkpoint",
pTask->id.idStr, pTask->checkpointingId); pTask->id.idStr, pTask->chkInfo.checkpointingId);
taosThreadMutexUnlock(&pTask->lock); taosThreadMutexUnlock(&pTask->lock);
streamMetaReleaseTask(pMeta, pTask); streamMetaReleaseTask(pMeta, pTask);
@ -1335,10 +1347,6 @@ int32_t tqProcessTaskCheckPointSourceReq(STQ* pTq, SRpcMsg* pMsg, SRpcMsg* pRsp)
// set the initial value for generating check point // set the initial value for generating check point
// set the mgmt epset info according to the checkout source msg from mnode, todo update mgmt epset if needed // set the mgmt epset info according to the checkout source msg from mnode, todo update mgmt epset if needed
if (pMeta->chkptNotReadyTasks == 0) {
pMeta->chkptNotReadyTasks = pMeta->numOfStreamTasks;
}
total = pMeta->numOfStreamTasks; total = pMeta->numOfStreamTasks;
streamMetaWUnLock(pMeta); streamMetaWUnLock(pMeta);
@ -1390,7 +1398,7 @@ int32_t tqProcessTaskResetReq(STQ* pTq, SRpcMsg* pMsg) {
} }
int32_t tqProcessTaskDropHTask(STQ* pTq, SRpcMsg* pMsg) { int32_t tqProcessTaskDropHTask(STQ* pTq, SRpcMsg* pMsg) {
SVDropHTaskReq* pReq = (SVDropHTaskReq*) pMsg->pCont; SVDropHTaskReq* pReq = (SVDropHTaskReq*)pMsg->pCont;
SStreamMeta* pMeta = pTq->pStreamMeta; SStreamMeta* pMeta = pTq->pStreamMeta;
SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId); SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId);
@ -1415,6 +1423,9 @@ int32_t tqProcessTaskDropHTask(STQ* pTq, SRpcMsg* pMsg) {
SStreamTaskId id = {.streamId = pTask->hTaskInfo.id.streamId, .taskId = pTask->hTaskInfo.id.taskId}; SStreamTaskId id = {.streamId = pTask->hTaskInfo.id.streamId, .taskId = pTask->hTaskInfo.id.taskId};
streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &id); streamBuildAndSendDropTaskMsg(pTask->pMsgCb, pMeta->vgId, &id);
// clear the scheduler status
streamTaskSetSchedStatusInactive(pTask);
tqDebug("s-task:%s set scheduler status:%d after drop fill-history task", pTask->id.idStr, pTask->status.schedStatus);
streamMetaReleaseTask(pMeta, pTask); streamMetaReleaseTask(pMeta, pTask);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }

View File

@ -104,8 +104,8 @@ int32_t streamStateSnapRead(SStreamStateReader* pReader, uint8_t** ppData) {
pHdr->type = SNAP_DATA_STREAM_STATE_BACKEND; pHdr->type = SNAP_DATA_STREAM_STATE_BACKEND;
pHdr->size = len; pHdr->size = len;
memcpy(pHdr->data, rowData, len); memcpy(pHdr->data, rowData, len);
tqDebug("vgId:%d, vnode stream-state snapshot read data success", TD_VID(pReader->pTq->pVnode));
taosMemoryFree(rowData); taosMemoryFree(rowData);
tqDebug("vgId:%d, vnode stream-state snapshot read data success", TD_VID(pReader->pTq->pVnode));
return code; return code;
_err: _err:
@ -139,7 +139,7 @@ int32_t streamStateSnapWriterOpen(STQ* pTq, int64_t sver, int64_t ever, SStreamS
pWriter->sver = sver; pWriter->sver = sver;
pWriter->ever = ever; pWriter->ever = ever;
sprintf(tdir, "%s%s%s%s%s", pTq->path, TD_DIRSEP, VNODE_TQ_STREAM, TD_DIRSEP, "received"); sprintf(tdir, "%s%s%s", pTq->path, TD_DIRSEP, VNODE_TQ_STREAM);
taosMkDir(tdir); taosMkDir(tdir);
SStreamSnapWriter* pSnapWriter = NULL; SStreamSnapWriter* pSnapWriter = NULL;
@ -167,25 +167,19 @@ int32_t streamStateSnapWriterClose(SStreamStateWriter* pWriter, int8_t rollback)
return code; return code;
} }
int32_t streamStateRebuildFromSnap(SStreamStateWriter* pWriter, int64_t chkpId) {
tqDebug("vgId:%d, vnode %s start to rebuild stream-state", TD_VID(pWriter->pTq->pVnode), STREAM_STATE_TRANSFER);
streamMetaWLock(pWriter->pTq->pStreamMeta);
int32_t code = streamMetaReopen(pWriter->pTq->pStreamMeta);
if (code == 0) {
streamMetaInitBackend(pWriter->pTq->pStreamMeta);
code = streamStateLoadTasks(pWriter);
}
streamMetaWUnLock(pWriter->pTq->pStreamMeta);
tqDebug("vgId:%d, vnode %s succ to rebuild stream-state", TD_VID(pWriter->pTq->pVnode), STREAM_STATE_TRANSFER);
taosMemoryFree(pWriter);
return code;
}
int32_t streamStateLoadTasks(SStreamStateWriter* pWriter) { return streamMetaLoadAllTasks(pWriter->pTq->pStreamMeta); }
int32_t streamStateSnapWrite(SStreamStateWriter* pWriter, uint8_t* pData, uint32_t nData) { int32_t streamStateSnapWrite(SStreamStateWriter* pWriter, uint8_t* pData, uint32_t nData) {
tqDebug("vgId:%d, vnode %s snapshot write data", TD_VID(pWriter->pTq->pVnode), STREAM_STATE_TRANSFER); tqDebug("vgId:%d, vnode %s snapshot write data", TD_VID(pWriter->pTq->pVnode), STREAM_STATE_TRANSFER);
return streamSnapWrite(pWriter->pWriterImpl, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); return streamSnapWrite(pWriter->pWriterImpl, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr));
} }
int32_t streamStateRebuildFromSnap(SStreamStateWriter* pWriter, int64_t chkpId) {
tqDebug("vgId:%d, vnode %s start to rebuild stream-state", TD_VID(pWriter->pTq->pVnode), STREAM_STATE_TRANSFER);
int32_t code = streamStateLoadTasks(pWriter);
tqDebug("vgId:%d, vnode %s succ to rebuild stream-state", TD_VID(pWriter->pTq->pVnode), STREAM_STATE_TRANSFER);
taosMemoryFree(pWriter);
return code;
}
int32_t streamStateLoadTasks(SStreamStateWriter* pWriter) {
return streamMetaReloadAllTasks(pWriter->pTq->pStreamMeta);
}

View File

@ -238,7 +238,6 @@ int32_t streamTaskSnapWrite(SStreamTaskWriter* pWriter, uint8_t* pData, uint32_t
goto _err; goto _err;
} }
tDecoderClear(&decoder); tDecoderClear(&decoder);
// tdbTbInsert(TTB *pTb, const void *pKey, int keyLen, const void *pVal, int valLen, TXN *pTxn)
int64_t key[2] = {taskId.streamId, taskId.taskId}; int64_t key[2] = {taskId.streamId, taskId.taskId};
taosWLockLatch(&pTq->pStreamMeta->lock); taosWLockLatch(&pTq->pStreamMeta->lock);

View File

@ -1131,9 +1131,13 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
char **values_list = taosMemoryCalloc(num_keys * 2, sizeof(char *)); char **values_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t)); size_t *values_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
char **errs = taosMemoryCalloc(num_keys * 2, sizeof(char *)); char **errs = taosMemoryCalloc(num_keys * 2, sizeof(char *));
(void)tsdbCacheCommit(pTsdb);
taosThreadMutexLock(&pTsdb->lruMutex); taosThreadMutexLock(&pTsdb->lruMutex);
taosThreadMutexLock(&pTsdb->rCache.rMutex); taosThreadMutexLock(&pTsdb->rCache.rMutex);
rocksMayWrite(pTsdb, true, false, false); // rocksMayWrite(pTsdb, true, false, false);
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list, rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list,
keys_list_sizes, values_list, values_list_sizes, errs); keys_list_sizes, values_list, values_list_sizes, errs);
taosThreadMutexUnlock(&pTsdb->rCache.rMutex); taosThreadMutexUnlock(&pTsdb->rCache.rMutex);

View File

@ -20,8 +20,6 @@
#define BLOCK_COMMIT_FACTOR 3 #define BLOCK_COMMIT_FACTOR 3
extern int vnodeScheduleTask(int (*execute)(void *), void *arg);
extern int vnodeScheduleTaskEx(int tpid, int (*execute)(void *), void *arg);
extern void remove_file(const char *fname, bool last_level); extern void remove_file(const char *fname, bool last_level);
#define TSDB_FS_EDIT_MIN TSDB_FEDIT_COMMIT #define TSDB_FS_EDIT_MIN TSDB_FEDIT_COMMIT
@ -651,7 +649,6 @@ _exit:
static int32_t close_file_system(STFileSystem *fs) { static int32_t close_file_system(STFileSystem *fs) {
TARRAY2_CLEAR(fs->fSetArr, tsdbTFileSetClear); TARRAY2_CLEAR(fs->fSetArr, tsdbTFileSetClear);
TARRAY2_CLEAR(fs->fSetArrTmp, tsdbTFileSetClear); TARRAY2_CLEAR(fs->fSetArrTmp, tsdbTFileSetClear);
// TODO
return 0; return 0;
} }
@ -748,36 +745,31 @@ _exit:
return code; return code;
} }
static void tsdbDoWaitBgTask(STFileSystem *fs, STFSBgTask *task) { int32_t tsdbFSCancelAllBgTask(STFileSystem *fs) {
task->numWait++; TARRAY2(int64_t) channelArr = {0};
taosThreadCondWait(task->done, &fs->tsdb->mutex);
task->numWait--;
if (task->numWait == 0) { // collect all open channels
taosThreadCondDestroy(task->done); taosThreadMutexLock(&fs->tsdb->mutex);
if (task->destroy) { STFileSet *fset;
task->destroy(task->arg); TARRAY2_FOREACH(fs->fSetArr, fset) {
if (VNODE_ASYNC_VALID_CHANNEL_ID(fset->bgTaskChannel)) {
TARRAY2_APPEND(&channelArr, fset->bgTaskChannel);
fset->bgTaskChannel = 0;
} }
taosMemoryFree(task);
} }
} taosThreadMutexUnlock(&fs->tsdb->mutex);
static void tsdbDoDoneBgTask(STFileSystem *fs, STFSBgTask *task) { // destroy all channels
if (task->numWait > 0) { int64_t channel;
taosThreadCondBroadcast(task->done); TARRAY2_FOREACH(&channelArr, channel) { vnodeAChannelDestroy(vnodeAsyncHandle[1], channel, true); }
} else { TARRAY2_DESTROY(&channelArr, NULL);
taosThreadCondDestroy(task->done); return 0;
if (task->destroy) {
task->destroy(task->arg);
}
taosMemoryFree(task);
}
} }
int32_t tsdbCloseFS(STFileSystem **fs) { int32_t tsdbCloseFS(STFileSystem **fs) {
if (fs[0] == NULL) return 0; if (fs[0] == NULL) return 0;
tsdbFSDisableBgTask(fs[0]); tsdbFSCancelAllBgTask(*fs);
close_file_system(fs[0]); close_file_system(fs[0]);
destroy_fs(fs); destroy_fs(fs);
return 0; return 0;
@ -910,7 +902,20 @@ int32_t tsdbFSEditCommit(STFileSystem *fs) {
} }
if (!skipMerge) { if (!skipMerge) {
code = tsdbSchedMerge(fs->tsdb, fset->fid); code = tsdbTFileSetOpenChannel(fset);
TSDB_CHECK_CODE(code, lino, _exit);
SMergeArg *arg = taosMemoryMalloc(sizeof(*arg));
if (arg == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
TSDB_CHECK_CODE(code, lino, _exit);
}
arg->tsdb = fs->tsdb;
arg->fid = fset->fid;
code = vnodeAsyncC(vnodeAsyncHandle[1], fset->bgTaskChannel, EVA_PRIORITY_HIGH, tsdbMerge, taosMemoryFree,
arg, NULL);
TSDB_CHECK_CODE(code, lino, _exit); TSDB_CHECK_CODE(code, lino, _exit);
} }
} }
@ -939,7 +944,11 @@ int32_t tsdbFSEditCommit(STFileSystem *fs) {
} }
} }
if (tsdbTFileSetIsEmpty(fset) && fset->bgTaskRunning == NULL) { if (tsdbTFileSetIsEmpty(fset)) {
if (VNODE_ASYNC_VALID_CHANNEL_ID(fset->bgTaskChannel)) {
vnodeAChannelDestroy(vnodeAsyncHandle[1], fset->bgTaskChannel, false);
fset->bgTaskChannel = 0;
}
TARRAY2_REMOVE(fs->fSetArr, i, tsdbTFileSetClear); TARRAY2_REMOVE(fs->fSetArr, i, tsdbTFileSetClear);
} else { } else {
i++; i++;
@ -1179,136 +1188,4 @@ _out:
pHash = NULL; pHash = NULL;
} }
return code; return code;
} }
const char *gFSBgTaskName[] = {NULL, "MERGE", "RETENTION", "COMPACT"};
static int32_t tsdbFSRunBgTask(void *arg) {
STFSBgTask *task = (STFSBgTask *)arg;
STFileSystem *fs = task->fs;
task->launchTime = taosGetTimestampMs();
task->run(task->arg);
task->finishTime = taosGetTimestampMs();
tsdbDebug("vgId:%d bg task:%s task id:%" PRId64 " finished, schedule time:%" PRId64 " launch time:%" PRId64
" finish time:%" PRId64,
TD_VID(fs->tsdb->pVnode), gFSBgTaskName[task->type], task->taskid, task->scheduleTime, task->launchTime,
task->finishTime);
taosThreadMutexLock(&fs->tsdb->mutex);
STFileSet *fset = NULL;
tsdbFSGetFSet(fs, task->fid, &fset);
ASSERT(fset != NULL && fset->bgTaskRunning == task);
// free last
tsdbDoDoneBgTask(fs, task);
fset->bgTaskRunning = NULL;
// schedule next
if (fset->bgTaskNum > 0) {
if (fs->stop) {
while (fset->bgTaskNum > 0) {
STFSBgTask *nextTask = fset->bgTaskQueue->next;
nextTask->prev->next = nextTask->next;
nextTask->next->prev = nextTask->prev;
fset->bgTaskNum--;
tsdbDoDoneBgTask(fs, nextTask);
}
} else {
// pop task from head
fset->bgTaskRunning = fset->bgTaskQueue->next;
fset->bgTaskRunning->prev->next = fset->bgTaskRunning->next;
fset->bgTaskRunning->next->prev = fset->bgTaskRunning->prev;
fset->bgTaskNum--;
vnodeScheduleTaskEx(1, tsdbFSRunBgTask, fset->bgTaskRunning);
}
}
taosThreadMutexUnlock(&fs->tsdb->mutex);
return 0;
}
// IMPORTANT: the caller must hold the fs->tsdb->mutex
int32_t tsdbFSScheduleBgTask(STFileSystem *fs, int32_t fid, EFSBgTaskT type, int32_t (*run)(void *),
void (*destroy)(void *), void *arg, int64_t *taskid) {
if (fs->stop) {
if (destroy) {
destroy(arg);
}
return 0;
}
STFileSet *fset;
tsdbFSGetFSet(fs, fid, &fset);
ASSERT(fset != NULL);
for (STFSBgTask *task = fset->bgTaskQueue->next; task != fset->bgTaskQueue; task = task->next) {
if (task->type == type) {
if (destroy) {
destroy(arg);
}
return 0;
}
}
// do schedule task
STFSBgTask *task = taosMemoryCalloc(1, sizeof(STFSBgTask));
if (task == NULL) return TSDB_CODE_OUT_OF_MEMORY;
taosThreadCondInit(task->done, NULL);
task->fs = fs;
task->fid = fid;
task->type = type;
task->run = run;
task->destroy = destroy;
task->arg = arg;
task->scheduleTime = taosGetTimestampMs();
task->taskid = ++fs->taskid;
if (fset->bgTaskRunning == NULL && fset->bgTaskNum == 0) {
// launch task directly
fset->bgTaskRunning = task;
vnodeScheduleTaskEx(1, tsdbFSRunBgTask, task);
} else {
// add to the queue tail
fset->bgTaskNum++;
task->next = fset->bgTaskQueue;
task->prev = fset->bgTaskQueue->prev;
task->prev->next = task;
task->next->prev = task;
}
if (taskid) *taskid = task->taskid;
return 0;
}
int32_t tsdbFSDisableBgTask(STFileSystem *fs) {
taosThreadMutexLock(&fs->tsdb->mutex);
for (;;) {
fs->stop = true;
bool done = true;
STFileSet *fset;
TARRAY2_FOREACH(fs->fSetArr, fset) {
if (fset->bgTaskRunning) {
tsdbDoWaitBgTask(fs, fset->bgTaskRunning);
done = false;
break;
}
}
if (done) break;
}
taosThreadMutexUnlock(&fs->tsdb->mutex);
return 0;
}
int32_t tsdbFSEnableBgTask(STFileSystem *fs) {
taosThreadMutexLock(&fs->tsdb->mutex);
fs->stop = false;
taosThreadMutexUnlock(&fs->tsdb->mutex);
return 0;
}

View File

@ -55,11 +55,6 @@ int64_t tsdbFSAllocEid(STFileSystem *fs);
int32_t tsdbFSEditBegin(STFileSystem *fs, const TFileOpArray *opArray, EFEditT etype); int32_t tsdbFSEditBegin(STFileSystem *fs, const TFileOpArray *opArray, EFEditT etype);
int32_t tsdbFSEditCommit(STFileSystem *fs); int32_t tsdbFSEditCommit(STFileSystem *fs);
int32_t tsdbFSEditAbort(STFileSystem *fs); int32_t tsdbFSEditAbort(STFileSystem *fs);
// background task
int32_t tsdbFSScheduleBgTask(STFileSystem *fs, int32_t fid, EFSBgTaskT type, int32_t (*run)(void *),
void (*destroy)(void *), void *arg, int64_t *taskid);
int32_t tsdbFSDisableBgTask(STFileSystem *fs);
int32_t tsdbFSEnableBgTask(STFileSystem *fs);
// other // other
int32_t tsdbFSGetFSet(STFileSystem *fs, int32_t fid, STFileSet **fset); int32_t tsdbFSGetFSet(STFileSystem *fs, int32_t fid, STFileSet **fset);
int32_t tsdbFSCheckCommit(STsdb *tsdb, int32_t fid); int32_t tsdbFSCheckCommit(STsdb *tsdb, int32_t fid);

View File

@ -14,6 +14,7 @@
*/ */
#include "tsdbFSet2.h" #include "tsdbFSet2.h"
#include "vnd.h"
int32_t tsdbSttLvlInit(int32_t level, SSttLvl **lvl) { int32_t tsdbSttLvlInit(int32_t level, SSttLvl **lvl) {
if (!(lvl[0] = taosMemoryMalloc(sizeof(SSttLvl)))) return TSDB_CODE_OUT_OF_MEMORY; if (!(lvl[0] = taosMemoryMalloc(sizeof(SSttLvl)))) return TSDB_CODE_OUT_OF_MEMORY;
@ -451,10 +452,7 @@ int32_t tsdbTFileSetInit(int32_t fid, STFileSet **fset) {
TARRAY2_INIT(fset[0]->lvlArr); TARRAY2_INIT(fset[0]->lvlArr);
// background task queue // background task queue
fset[0]->bgTaskNum = 0; fset[0]->bgTaskChannel = 0;
fset[0]->bgTaskQueue->next = fset[0]->bgTaskQueue;
fset[0]->bgTaskQueue->prev = fset[0]->bgTaskQueue;
fset[0]->bgTaskRunning = NULL;
// block commit variables // block commit variables
taosThreadCondInit(&fset[0]->canCommit, NULL); taosThreadCondInit(&fset[0]->canCommit, NULL);
@ -650,3 +648,8 @@ bool tsdbTFileSetIsEmpty(const STFileSet *fset) {
} }
return TARRAY2_SIZE(fset->lvlArr) == 0; return TARRAY2_SIZE(fset->lvlArr) == 0;
} }
int32_t tsdbTFileSetOpenChannel(STFileSet *fset) {
if (VNODE_ASYNC_VALID_CHANNEL_ID(fset->bgTaskChannel)) return 0;
return vnodeAChannelInit(vnodeAsyncHandle[1], &fset->bgTaskChannel);
}

View File

@ -22,14 +22,12 @@
extern "C" { extern "C" {
#endif #endif
typedef struct STFileSet STFileSet; typedef struct STFileOp STFileOp;
typedef struct STFileOp STFileOp; typedef struct SSttLvl SSttLvl;
typedef struct SSttLvl SSttLvl;
typedef TARRAY2(STFileObj *) TFileObjArray; typedef TARRAY2(STFileObj *) TFileObjArray;
typedef TARRAY2(SSttLvl *) TSttLvlArray; typedef TARRAY2(SSttLvl *) TSttLvlArray;
typedef TARRAY2(STFileOp) TFileOpArray; typedef TARRAY2(STFileOp) TFileOpArray;
typedef struct STFileSystem STFileSystem; typedef struct STFileSystem STFileSystem;
typedef struct STFSBgTask STFSBgTask;
typedef enum { typedef enum {
TSDB_FOP_NONE = 0, TSDB_FOP_NONE = 0,
@ -72,33 +70,8 @@ bool tsdbTFileSetIsEmpty(const STFileSet *fset);
// stt // stt
int32_t tsdbSttLvlInit(int32_t level, SSttLvl **lvl); int32_t tsdbSttLvlInit(int32_t level, SSttLvl **lvl);
int32_t tsdbSttLvlClear(SSttLvl **lvl); int32_t tsdbSttLvlClear(SSttLvl **lvl);
// open channel
typedef enum { int32_t tsdbTFileSetOpenChannel(STFileSet *fset);
TSDB_BG_TASK_MERGER = 1,
TSDB_BG_TASK_RETENTION,
TSDB_BG_TASK_COMPACT,
} EFSBgTaskT;
struct STFSBgTask {
STFileSystem *fs;
int32_t fid;
EFSBgTaskT type;
int32_t (*run)(void *arg);
void (*destroy)(void *arg);
void *arg;
TdThreadCond done[1];
int32_t numWait;
int64_t taskid;
int64_t scheduleTime;
int64_t launchTime;
int64_t finishTime;
struct STFSBgTask *prev;
struct STFSBgTask *next;
};
struct STFileOp { struct STFileOp {
tsdb_fop_t optype; tsdb_fop_t optype;
@ -118,10 +91,8 @@ struct STFileSet {
STFileObj *farr[TSDB_FTYPE_MAX]; // file array STFileObj *farr[TSDB_FTYPE_MAX]; // file array
TSttLvlArray lvlArr[1]; // level array TSttLvlArray lvlArr[1]; // level array
// background task queue // background task channel
int32_t bgTaskNum; int64_t bgTaskChannel;
STFSBgTask bgTaskQueue[1];
STFSBgTask *bgTaskRunning;
// block commit variables // block commit variables
TdThreadCond canCommit; TdThreadCond canCommit;

View File

@ -17,11 +17,6 @@
#define TSDB_MAX_LEVEL 2 // means max level is 3 #define TSDB_MAX_LEVEL 2 // means max level is 3
typedef struct {
STsdb *tsdb;
int32_t fid;
} SMergeArg;
typedef struct { typedef struct {
STsdb *tsdb; STsdb *tsdb;
int32_t fid; int32_t fid;
@ -528,7 +523,7 @@ static int32_t tsdbMergeGetFSet(SMerger *merger) {
return 0; return 0;
} }
static int32_t tsdbMerge(void *arg) { int32_t tsdbMerge(void *arg) {
int32_t code = 0; int32_t code = 0;
int32_t lino = 0; int32_t lino = 0;
SMergeArg *mergeArg = (SMergeArg *)arg; SMergeArg *mergeArg = (SMergeArg *)arg;
@ -597,18 +592,3 @@ _exit:
tsdbTFileSetClear(&merger->fset); tsdbTFileSetClear(&merger->fset);
return code; return code;
} }
int32_t tsdbSchedMerge(STsdb *tsdb, int32_t fid) {
SMergeArg *arg = taosMemoryMalloc(sizeof(*arg));
if (arg == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
arg->tsdb = tsdb;
arg->fid = fid;
int32_t code = tsdbFSScheduleBgTask(tsdb->pFS, fid, TSDB_BG_TASK_MERGER, tsdbMerge, taosMemoryFree, arg, NULL);
if (code) taosMemoryFree(arg);
return code;
}

View File

@ -48,7 +48,7 @@ static int32_t doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScan
static int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key, static int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key,
STsdbReader* pReader); STsdbReader* pReader);
static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, int32_t order, SCostSummary* pCost); static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, int32_t order, SReadCostSummary* pCost);
static STsdb* getTsdbByRetentions(SVnode* pVnode, SQueryTableDataCond* pCond, SRetention* retentions, const char* idstr, static STsdb* getTsdbByRetentions(SVnode* pVnode, SQueryTableDataCond* pCond, SRetention* retentions, const char* idstr,
int8_t* pLevel); int8_t* pLevel);
static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level); static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level);
@ -58,6 +58,7 @@ static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbRea
static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo); static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo);
static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter); static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter);
static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order); static int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order);
static void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInfo* pBlockScanInfo);
static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); } static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
@ -168,7 +169,7 @@ static int32_t filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader, bo
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
SCostSummary* pCost = &pReader->cost; SReadCostSummary* pCost = &pReader->cost;
pIter->pLastBlockReader->uid = 0; pIter->pLastBlockReader->uid = 0;
tMergeTreeClose(&pIter->pLastBlockReader->mergeTree); tMergeTreeClose(&pIter->pLastBlockReader->mergeTree);
@ -291,11 +292,7 @@ static SSDataBlock* createResBlock(SQueryTableDataCond* pCond, int32_t capacity)
} }
static int32_t tsdbInitReaderLock(STsdbReader* pReader) { static int32_t tsdbInitReaderLock(STsdbReader* pReader) {
int32_t code = -1; int32_t code = taosThreadMutexInit(&pReader->readerMutex, NULL);
qTrace("tsdb/read: %p, pre-init read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
code = taosThreadMutexInit(&pReader->readerMutex, NULL);
qTrace("tsdb/read: %p, post-init read mutex: %p, code: %d", pReader, &pReader->readerMutex, code); qTrace("tsdb/read: %p, post-init read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
return code; return code;
@ -324,22 +321,14 @@ static int32_t tsdbAcquireReader(STsdbReader* pReader) {
} }
static int32_t tsdbTryAcquireReader(STsdbReader* pReader) { static int32_t tsdbTryAcquireReader(STsdbReader* pReader) {
int32_t code = -1; int32_t code = taosThreadMutexTryLock(&pReader->readerMutex);
qTrace("tsdb/read: %p, pre-trytake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
code = taosThreadMutexTryLock(&pReader->readerMutex);
qTrace("tsdb/read: %p, post-trytake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code); qTrace("tsdb/read: %p, post-trytake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
return code; return code;
} }
static int32_t tsdbReleaseReader(STsdbReader* pReader) { static int32_t tsdbReleaseReader(STsdbReader* pReader) {
int32_t code = -1; int32_t code = taosThreadMutexUnlock(&pReader->readerMutex);
qTrace("tsdb/read: %p, pre-untake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
code = taosThreadMutexUnlock(&pReader->readerMutex);
qTrace("tsdb/read: %p, post-untake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code); qTrace("tsdb/read: %p, post-untake read mutex: %p, code: %d", pReader, &pReader->readerMutex, code);
return code; return code;
@ -432,6 +421,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, void
} }
tsdbInitReaderLock(pReader); tsdbInitReaderLock(pReader);
tsem_init(&pReader->resumeAfterSuspend, 0, 0);
*ppReader = pReader; *ppReader = pReader;
return code; return code;
@ -1015,8 +1005,8 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader) {
// check if current block are all handled // check if current block are all handled
if (pDumpInfo->rowIndex >= 0 && pDumpInfo->rowIndex < pRecord->numRow) { if (pDumpInfo->rowIndex >= 0 && pDumpInfo->rowIndex < pRecord->numRow) {
int64_t ts = pBlockData->aTSKEY[pDumpInfo->rowIndex]; int64_t ts = pBlockData->aTSKEY[pDumpInfo->rowIndex];
if (outOfTimeWindow(ts, if (outOfTimeWindow(ts, &pReader->info.window)) {
&pReader->info.window)) { // the remain data has out of query time window, ignore current block // the remain data has out of query time window, ignore current block
setBlockAllDumped(pDumpInfo, ts, pReader->info.order); setBlockAllDumped(pDumpInfo, ts, pReader->info.order);
} }
} else { } else {
@ -1123,16 +1113,12 @@ static bool getNeighborBlockOfSameTable(SDataBlockIter* pBlockIter, SFileDataBlo
} }
int32_t step = asc ? 1 : -1; int32_t step = asc ? 1 : -1;
// *nextIndex = pBlockInfo->tbBlockIdx + step;
// *pBlockIndex = *(SBlockIndex*)taosArrayGet(pTableBlockScanInfo->pBlockList, *nextIndex);
STableDataBlockIdx* pTableDataBlockIdx = STableDataBlockIdx* pTableDataBlockIdx =
taosArrayGet(pTableBlockScanInfo->pBlockIdxList, pBlockInfo->tbBlockIdx + step); taosArrayGet(pTableBlockScanInfo->pBlockIdxList, pBlockInfo->tbBlockIdx + step);
SFileDataBlockInfo* p = taosArrayGet(pBlockIter->blockList, pTableDataBlockIdx->globalIndex); SFileDataBlockInfo* p = taosArrayGet(pBlockIter->blockList, pTableDataBlockIdx->globalIndex);
memcpy(pRecord, &p->record, sizeof(SBrinRecord)); memcpy(pRecord, &p->record, sizeof(SBrinRecord));
*nextIndex = pBlockInfo->tbBlockIdx + step; *nextIndex = pBlockInfo->tbBlockIdx + step;
// tMapDataGetItemByIdx(&pTableBlockScanInfo->mapData, pIndex->ordinalIndex, pBlock, tGetDataBlk);
return true; return true;
} }
@ -1376,23 +1362,19 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo*
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
int64_t st = taosGetTimestampUs(); int64_t st = taosGetTimestampUs();
SSDataBlock* pBlock = pReader->resBlockInfo.pResBlock;
int32_t code = buildDataBlockFromBufImpl(pBlockScanInfo, endKey, pReader->resBlockInfo.capacity, pReader); int32_t code = buildDataBlockFromBufImpl(pBlockScanInfo, endKey, pReader->resBlockInfo.capacity, pReader);
blockDataUpdateTsWindow(pBlock, pReader->suppInfo.slotId[0]); double el = (taosGetTimestampUs() - st) / 1000.0;
pBlock->info.id.uid = pBlockScanInfo->uid; updateComposedBlockInfo(pReader, el, pBlockScanInfo);
setComposedBlockFlag(pReader, true);
double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
tsdbDebug("%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%" PRId64 ", brange:%" PRId64 tsdbDebug("%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%" PRId64 ", brange:%" PRId64
" - %" PRId64 ", uid:%" PRIu64 ", %s", " - %" PRId64 ", uid:%" PRIu64 ", %s",
pReader, elapsedTime, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey, pReader, el, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey,
pBlockScanInfo->uid, pReader->idStr); pBlockScanInfo->uid, pReader->idStr);
pReader->cost.buildmemBlock += elapsedTime; pReader->cost.buildmemBlock += el;
return code; return code;
} }
@ -2293,13 +2275,12 @@ static int32_t loadNeighborIfOverlap(SFileDataBlockInfo* pBlockInfo, STableBlock
return code; return code;
} }
static void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInfo* pBlockScanInfo) { void updateComposedBlockInfo(STsdbReader* pReader, double el, STableBlockScanInfo* pBlockScanInfo) {
SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock; SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
pResBlock->info.id.uid = (pBlockScanInfo != NULL) ? pBlockScanInfo->uid : 0; pResBlock->info.id.uid = (pBlockScanInfo != NULL) ? pBlockScanInfo->uid : 0;
pResBlock->info.dataLoad = 1; pResBlock->info.dataLoad = 1;
blockDataUpdateTsWindow(pResBlock, pReader->suppInfo.slotId[0]); blockDataUpdateTsWindow(pResBlock, pReader->suppInfo.slotId[0]);
setComposedBlockFlag(pReader, true); setComposedBlockFlag(pReader, true);
pReader->cost.composedBlocks += 1; pReader->cost.composedBlocks += 1;
@ -2356,7 +2337,6 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
pBlockScanInfo = *pReader->status.pTableIter; pBlockScanInfo = *pReader->status.pTableIter;
if (pReader->pIgnoreTables && if (pReader->pIgnoreTables &&
taosHashGet(*pReader->pIgnoreTables, &pBlockScanInfo->uid, sizeof(pBlockScanInfo->uid))) { taosHashGet(*pReader->pIgnoreTables, &pBlockScanInfo->uid, sizeof(pBlockScanInfo->uid))) {
// setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->info.order);
return code; return code;
} }
} }
@ -2436,7 +2416,7 @@ int32_t getInitialDelIndex(const SArray* pDelSkyline, int32_t order) {
return ASCENDING_TRAVERSE(order) ? 0 : taosArrayGetSize(pDelSkyline) - 1; return ASCENDING_TRAVERSE(order) ? 0 : taosArrayGetSize(pDelSkyline) - 1;
} }
int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, int32_t order, SCostSummary* pCost) { int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, int32_t order, SReadCostSummary* pCost) {
int32_t code = 0; int32_t code = 0;
int32_t newDelDataInFile = taosArrayGetSize(pBlockScanInfo->pFileDelData); int32_t newDelDataInFile = taosArrayGetSize(pBlockScanInfo->pFileDelData);
if (newDelDataInFile == 0 && if (newDelDataInFile == 0 &&
@ -2935,6 +2915,8 @@ static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) {
SReaderStatus* pStatus = &pReader->status; SReaderStatus* pStatus = &pReader->status;
STableUidList* pUidList = &pStatus->uidList; STableUidList* pUidList = &pStatus->uidList;
tsdbDebug("seq load data blocks from cache, %s", pReader->idStr);
while (1) { while (1) {
if (pReader->code != TSDB_CODE_SUCCESS) { if (pReader->code != TSDB_CODE_SUCCESS) {
tsdbWarn("tsdb reader is stopped ASAP, code:%s, %s", strerror(pReader->code), pReader->idStr); tsdbWarn("tsdb reader is stopped ASAP, code:%s, %s", strerror(pReader->code), pReader->idStr);
@ -3043,6 +3025,8 @@ static ERetrieveType doReadDataFromLastFiles(STsdbReader* pReader) {
SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock; SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock;
SDataBlockIter* pBlockIter = &pReader->status.blockIter; SDataBlockIter* pBlockIter = &pReader->status.blockIter;
tsdbDebug("seq load data blocks from stt files %s", pReader->idStr);
while (1) { while (1) {
terrno = 0; terrno = 0;
@ -3774,7 +3758,6 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
int32_t code = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS;
do { do {
// SRow* pTSRow = NULL;
TSDBROW row = {.type = -1}; TSDBROW row = {.type = -1};
bool freeTSRow = false; bool freeTSRow = false;
tsdbGetNextRowInMem(pBlockScanInfo, pReader, &row, endKey, &freeTSRow); tsdbGetNextRowInMem(pBlockScanInfo, pReader, &row, endKey, &freeTSRow);
@ -3783,6 +3766,7 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
} }
if (row.type == TSDBROW_ROW_FMT) { if (row.type == TSDBROW_ROW_FMT) {
int64_t ts = row.pTSRow->ts;;
code = doAppendRowFromTSRow(pBlock, pReader, row.pTSRow, pBlockScanInfo); code = doAppendRowFromTSRow(pBlock, pReader, row.pTSRow, pBlockScanInfo);
if (freeTSRow) { if (freeTSRow) {
@ -3792,13 +3776,17 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
if (code) { if (code) {
return code; return code;
} }
pBlockScanInfo->lastProcKey = ts;
} else { } else {
code = doAppendRowFromFileBlock(pBlock, pReader, row.pBlockData, row.iRow); code = doAppendRowFromFileBlock(pBlock, pReader, row.pBlockData, row.iRow);
if (code) { if (code) {
break; break;
} }
pBlockScanInfo->lastProcKey = row.pBlockData->aTSKEY[row.iRow];
} }
// no data in buffer, return immediately // no data in buffer, return immediately
if (!(pBlockScanInfo->iter.hasVal || pBlockScanInfo->iiter.hasVal)) { if (!(pBlockScanInfo->iter.hasVal || pBlockScanInfo->iiter.hasVal)) {
break; break;
@ -4107,7 +4095,7 @@ void tsdbReaderClose2(STsdbReader* pReader) {
tsdbDataFileReaderClose(&pReader->pFileReader); tsdbDataFileReaderClose(&pReader->pFileReader);
} }
SCostSummary* pCost = &pReader->cost; SReadCostSummary* pCost = &pReader->cost;
SFilesetIter* pFilesetIter = &pReader->status.fileIter; SFilesetIter* pFilesetIter = &pReader->status.fileIter;
if (pFilesetIter->pLastBlockReader != NULL) { if (pFilesetIter->pLastBlockReader != NULL) {
SLastBlockReader* pLReader = pFilesetIter->pLastBlockReader; SLastBlockReader* pLReader = pFilesetIter->pLastBlockReader;
@ -4122,6 +4110,7 @@ void tsdbReaderClose2(STsdbReader* pReader) {
tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, true); tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, true);
pReader->pReadSnap = NULL; pReader->pReadSnap = NULL;
tsem_destroy(&pReader->resumeAfterSuspend);
tsdbReleaseReader(pReader); tsdbReleaseReader(pReader);
tsdbUninitReaderLock(pReader); tsdbUninitReaderLock(pReader);
@ -4154,6 +4143,8 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) {
SReaderStatus* pStatus = &pReader->status; SReaderStatus* pStatus = &pReader->status;
STableBlockScanInfo* pBlockScanInfo = NULL; STableBlockScanInfo* pBlockScanInfo = NULL;
pReader->status.suspendInvoked = true; // record the suspend status
if (pStatus->loadFromFile) { if (pStatus->loadFromFile) {
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter); SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter);
if (pBlockInfo != NULL) { if (pBlockInfo != NULL) {
@ -4167,84 +4158,34 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) {
tsdbDataFileReaderClose(&pReader->pFileReader); tsdbDataFileReaderClose(&pReader->pFileReader);
SCostSummary* pCost = &pReader->cost; SReadCostSummary* pCost = &pReader->cost;
pReader->status.pLDataIterArray = destroySttBlockReader(pReader->status.pLDataIterArray, &pCost->sttCost); pReader->status.pLDataIterArray = destroySttBlockReader(pReader->status.pLDataIterArray, &pCost->sttCost);
pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES); pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES);
// resetDataBlockScanInfo excluding lastKey
STableBlockScanInfo** p = NULL;
int32_t iter = 0;
while ((p = tSimpleHashIterate(pStatus->pTableMap, p, &iter)) != NULL) {
STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p;
pInfo->iterInit = false;
pInfo->iter.hasVal = false;
pInfo->iiter.hasVal = false;
if (pInfo->iter.iter != NULL) {
pInfo->iter.iter = tsdbTbDataIterDestroy(pInfo->iter.iter);
}
if (pInfo->iiter.iter != NULL) {
pInfo->iiter.iter = tsdbTbDataIterDestroy(pInfo->iiter.iter);
}
pInfo->delSkyline = taosArrayDestroy(pInfo->delSkyline);
pInfo->pFileDelData = taosArrayDestroy(pInfo->pFileDelData);
}
} else {
// resetDataBlockScanInfo excluding lastKey
STableBlockScanInfo** p = NULL;
int32_t iter = 0;
while ((p = tSimpleHashIterate(pStatus->pTableMap, p, &iter)) != NULL) {
STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p;
pInfo->iterInit = false;
pInfo->iter.hasVal = false;
pInfo->iiter.hasVal = false;
if (pInfo->iter.iter != NULL) {
pInfo->iter.iter = tsdbTbDataIterDestroy(pInfo->iter.iter);
}
if (pInfo->iiter.iter != NULL) {
pInfo->iiter.iter = tsdbTbDataIterDestroy(pInfo->iiter.iter);
}
pInfo->delSkyline = taosArrayDestroy(pInfo->delSkyline);
}
pBlockScanInfo = pStatus->pTableIter == NULL ? NULL : *pStatus->pTableIter;
if (pBlockScanInfo) {
// save lastKey to restore memory iterator
STimeWindow w = pReader->resBlockInfo.pResBlock->info.window;
pBlockScanInfo->lastProcKey = ASCENDING_TRAVERSE(pReader->info.order) ? w.ekey : w.skey;
// reset current current table's data block scan info,
pBlockScanInfo->iterInit = false;
pBlockScanInfo->iter.hasVal = false;
pBlockScanInfo->iiter.hasVal = false;
if (pBlockScanInfo->iter.iter != NULL) {
pBlockScanInfo->iter.iter = tsdbTbDataIterDestroy(pBlockScanInfo->iter.iter);
}
if (pBlockScanInfo->iiter.iter != NULL) {
pBlockScanInfo->iiter.iter = tsdbTbDataIterDestroy(pBlockScanInfo->iiter.iter);
}
pBlockScanInfo->pBlockList = taosArrayDestroy(pBlockScanInfo->pBlockList);
pBlockScanInfo->pBlockIdxList = taosArrayDestroy(pBlockScanInfo->pBlockIdxList);
// TODO: keep skyline for reuse
pBlockScanInfo->delSkyline = taosArrayDestroy(pBlockScanInfo->delSkyline);
}
} }
// resetDataBlockScanInfo excluding lastKey
STableBlockScanInfo** p = NULL;
int32_t step = ASCENDING_TRAVERSE(pReader->info.order)? 1:-1;
int32_t iter = 0;
while ((p = tSimpleHashIterate(pStatus->pTableMap, p, &iter)) != NULL) {
STableBlockScanInfo* pInfo = *(STableBlockScanInfo**)p;
clearBlockScanInfo(pInfo);
pInfo->sttKeyInfo.nextProcKey = pInfo->lastProcKey + step;
}
pStatus->uidList.currentIndex = 0;
initReaderStatus(pStatus);
tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, false); tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, false);
pReader->pReadSnap = NULL; pReader->pReadSnap = NULL;
pReader->flag = READER_STATUS_SUSPEND; pReader->flag = READER_STATUS_SUSPEND;
#if SUSPEND_RESUME_TEST
tsem_post(&pReader->resumeAfterSuspend);
#endif
tsdbDebug("reader: %p suspended uid %" PRIu64 " in this query %s", pReader, pBlockScanInfo ? pBlockScanInfo->uid : 0, tsdbDebug("reader: %p suspended uid %" PRIu64 " in this query %s", pReader, pBlockScanInfo ? pBlockScanInfo->uid : 0,
pReader->idStr); pReader->idStr);
return code; return code;
@ -4399,6 +4340,16 @@ int32_t tsdbNextDataBlock2(STsdbReader* pReader, bool* hasNext) {
SReaderStatus* pStatus = &pReader->status; SReaderStatus* pStatus = &pReader->status;
// NOTE: the following codes is used to perform test for suspend/resume for tsdbReader when it blocks the commit
// the data should be ingested in round-robin and all the child tables should be createted before ingesting data
// the version range of query will be used to identify the correctness of suspend/resume functions.
// this function will blocked before loading the SECOND block from vnode-buffer, and restart itself from sst-files
#if SUSPEND_RESUME_TEST
if (!pReader->status.suspendInvoked && !pReader->status.loadFromFile) {
tsem_wait(&pReader->resumeAfterSuspend);
}
#endif
code = tsdbAcquireReader(pReader); code = tsdbAcquireReader(pReader);
qTrace("tsdb/read: %p, take read mutex, code: %d", pReader, code); qTrace("tsdb/read: %p, take read mutex, code: %d", pReader, code);

View File

@ -210,6 +210,7 @@ void clearBlockScanInfo(STableBlockScanInfo* p) {
p->iterInit = false; p->iterInit = false;
p->iter.hasVal = false; p->iter.hasVal = false;
p->iiter.hasVal = false; p->iiter.hasVal = false;
p->sttKeyInfo.status = STT_FILE_READER_UNINIT;
if (p->iter.iter != NULL) { if (p->iter.iter != NULL) {
p->iter.iter = tsdbTbDataIterDestroy(p->iter.iter); p->iter.iter = tsdbTbDataIterDestroy(p->iter.iter);

View File

@ -96,7 +96,7 @@ typedef struct SResultBlockInfo {
int64_t capacity; int64_t capacity;
} SResultBlockInfo; } SResultBlockInfo;
typedef struct SCostSummary { typedef struct SReadCostSummary {
int64_t numOfBlocks; int64_t numOfBlocks;
double blockLoadTime; double blockLoadTime;
double buildmemBlock; double buildmemBlock;
@ -110,7 +110,7 @@ typedef struct SCostSummary {
double createScanInfoList; double createScanInfoList;
double createSkylineIterTime; double createSkylineIterTime;
double initLastBlockReader; double initLastBlockReader;
} SCostSummary; } SReadCostSummary;
typedef struct STableUidList { typedef struct STableUidList {
uint64_t* tableUidList; // access table uid list in uid ascending order list uint64_t* tableUidList; // access table uid list in uid ascending order list
@ -122,12 +122,6 @@ typedef struct {
int32_t numOfSttFiles; int32_t numOfSttFiles;
} SBlockNumber; } SBlockNumber;
typedef struct SBlockIndex {
int32_t ordinalIndex;
int64_t inFileOffset;
STimeWindow window; // todo replace it with overlap flag.
} SBlockIndex;
typedef struct SBlockOrderWrapper { typedef struct SBlockOrderWrapper {
int64_t uid; int64_t uid;
int64_t offset; int64_t offset;
@ -192,6 +186,7 @@ typedef struct SFileBlockDumpInfo {
} SFileBlockDumpInfo; } SFileBlockDumpInfo;
typedef struct SReaderStatus { typedef struct SReaderStatus {
bool suspendInvoked;
bool loadFromFile; // check file stage bool loadFromFile; // check file stage
bool composedDataBlock; // the returned data block is a composed block or not bool composedDataBlock; // the returned data block is a composed block or not
SSHashObj* pTableMap; // SHash<STableBlockScanInfo> SSHashObj* pTableMap; // SHash<STableBlockScanInfo>
@ -220,7 +215,8 @@ struct STsdbReader {
int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows
SBlockLoadSuppInfo suppInfo; SBlockLoadSuppInfo suppInfo;
STsdbReadSnap* pReadSnap; STsdbReadSnap* pReadSnap;
SCostSummary cost; tsem_t resumeAfterSuspend;
SReadCostSummary cost;
SHashObj** pIgnoreTables; SHashObj** pIgnoreTables;
SSHashObj* pSchemaMap; // keep the retrieved schema info, to avoid the overhead by repeatly load schema SSHashObj* pSchemaMap; // keep the retrieved schema info, to avoid the overhead by repeatly load schema
SDataFileReader* pFileReader; // the file reader SDataFileReader* pFileReader; // the file reader

View File

@ -249,7 +249,7 @@ _exit:
if (code) { if (code) {
TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code);
} else { } else {
tsdbInfo("vid:%d, cid:%" PRId64 ", %s done", TD_VID(rtner->tsdb->pVnode), rtner->cid, __func__); tsdbDebug("vid:%d, cid:%" PRId64 ", %s done", TD_VID(rtner->tsdb->pVnode), rtner->cid, __func__);
} }
return code; return code;
} }
@ -279,7 +279,7 @@ _exit:
if (code) { if (code) {
TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code);
} else { } else {
tsdbInfo("vid:%d, cid:%" PRId64 ", %s done", TD_VID(rtner->tsdb->pVnode), rtner->cid, __func__); tsdbDebug("vid:%d, cid:%" PRId64 ", %s done", TD_VID(rtner->tsdb->pVnode), rtner->cid, __func__);
} }
tsdbFSDestroyCopySnapshot(&rtner->fsetArr); tsdbFSDestroyCopySnapshot(&rtner->fsetArr);
return code; return code;
@ -391,32 +391,6 @@ _exit:
static void tsdbFreeRtnArg(void *arg) { taosMemoryFree(arg); } static void tsdbFreeRtnArg(void *arg) { taosMemoryFree(arg); }
static int32_t tsdbDoRetentionSync(void *arg) {
int32_t code = 0;
int32_t lino = 0;
SRTNer rtner[1] = {0};
code = tsdbDoRetentionBegin(arg, rtner);
TSDB_CHECK_CODE(code, lino, _exit);
STFileSet *fset;
TARRAY2_FOREACH(rtner->fsetArr, fset) {
code = tsdbDoRetentionOnFileSet(rtner, fset);
TSDB_CHECK_CODE(code, lino, _exit);
}
code = tsdbDoRetentionEnd(rtner);
TSDB_CHECK_CODE(code, lino, _exit);
_exit:
if (code) {
TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code);
}
tsem_post(&((SRtnArg *)arg)->tsdb->pVnode->canCommit);
tsdbFreeRtnArg(arg);
return code;
}
static int32_t tsdbDoRetentionAsync(void *arg) { static int32_t tsdbDoRetentionAsync(void *arg) {
int32_t code = 0; int32_t code = 0;
int32_t lino = 0; int32_t lino = 0;
@ -454,49 +428,41 @@ _exit:
int32_t tsdbRetention(STsdb *tsdb, int64_t now, int32_t sync) { int32_t tsdbRetention(STsdb *tsdb, int64_t now, int32_t sync) {
int32_t code = 0; int32_t code = 0;
if (sync) { // sync retention taosThreadMutexLock(&tsdb->mutex);
STFileSet *fset;
TARRAY2_FOREACH(tsdb->pFS->fSetArr, fset) {
code = tsdbTFileSetOpenChannel(fset);
if (code) {
taosThreadMutexUnlock(&tsdb->mutex);
return code;
}
SRtnArg *arg = taosMemoryMalloc(sizeof(*arg)); SRtnArg *arg = taosMemoryMalloc(sizeof(*arg));
if (arg == NULL) { if (arg == NULL) {
taosThreadMutexUnlock(&tsdb->mutex);
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
} }
arg->tsdb = tsdb; arg->tsdb = tsdb;
arg->now = now; arg->now = now;
arg->fid = INT32_MAX; arg->fid = fset->fid;
tsem_wait(&tsdb->pVnode->canCommit); if (sync) {
code = vnodeScheduleTask(tsdbDoRetentionSync, arg); code = vnodeAsyncC(vnodeAsyncHandle[0], tsdb->pVnode->commitChannel, EVA_PRIORITY_LOW, tsdbDoRetentionAsync,
tsdbFreeRtnArg, arg, NULL);
} else {
code = vnodeAsyncC(vnodeAsyncHandle[1], fset->bgTaskChannel, EVA_PRIORITY_LOW, tsdbDoRetentionAsync,
tsdbFreeRtnArg, arg, NULL);
}
if (code) { if (code) {
tsem_post(&tsdb->pVnode->canCommit); tsdbFreeRtnArg(arg);
taosMemoryFree(arg); taosThreadMutexUnlock(&tsdb->mutex);
return code; return code;
} }
} else { // async retention
taosThreadMutexLock(&tsdb->mutex);
STFileSet *fset;
TARRAY2_FOREACH(tsdb->pFS->fSetArr, fset) {
SRtnArg *arg = taosMemoryMalloc(sizeof(*arg));
if (arg == NULL) {
taosThreadMutexUnlock(&tsdb->mutex);
return TSDB_CODE_OUT_OF_MEMORY;
}
arg->tsdb = tsdb;
arg->now = now;
arg->fid = fset->fid;
code = tsdbFSScheduleBgTask(tsdb->pFS, fset->fid, TSDB_BG_TASK_RETENTION, tsdbDoRetentionAsync, tsdbFreeRtnArg,
arg, NULL);
if (code) {
tsdbFreeRtnArg(arg);
taosThreadMutexUnlock(&tsdb->mutex);
return code;
}
}
taosThreadMutexUnlock(&tsdb->mutex);
} }
taosThreadMutexUnlock(&tsdb->mutex);
return code; return code;
} }

View File

@ -1032,9 +1032,6 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, void* pRang
int32_t code = 0; int32_t code = 0;
int32_t lino = 0; int32_t lino = 0;
// disable background tasks
tsdbFSDisableBgTask(pTsdb->pFS);
// start to write // start to write
writer[0] = taosMemoryCalloc(1, sizeof(*writer[0])); writer[0] = taosMemoryCalloc(1, sizeof(*writer[0]));
if (writer[0] == NULL) return TSDB_CODE_OUT_OF_MEMORY; if (writer[0] == NULL) return TSDB_CODE_OUT_OF_MEMORY;
@ -1107,7 +1104,6 @@ int32_t tsdbSnapWriterClose(STsdbSnapWriter** writer, int8_t rollback) {
taosThreadMutexUnlock(&writer[0]->tsdb->mutex); taosThreadMutexUnlock(&writer[0]->tsdb->mutex);
} }
tsdbFSEnableBgTask(tsdb->pFS);
tsdbIterMergerClose(&writer[0]->ctx->tombIterMerger); tsdbIterMergerClose(&writer[0]->ctx->tombIterMerger);
tsdbIterMergerClose(&writer[0]->ctx->dataIterMerger); tsdbIterMergerClose(&writer[0]->ctx->dataIterMerger);
@ -1595,3 +1591,6 @@ _out:
return code; return code;
} }
extern int32_t tsdbFSCancelAllBgTask(STFileSystem* fs);
int32_t tsdbCancelAllBgTask(STsdb* tsdb) { return tsdbFSCancelAllBgTask(tsdb->pFS); }

View File

@ -0,0 +1,719 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "vnd.h"
#include "vnodeHash.h"
typedef struct SVATask SVATask;
typedef struct SVAChannel SVAChannel;
#define VNODE_ASYNC_DEFAULT_WORKERS 4
#define VNODE_ASYNC_MAX_WORKERS 256
// priority
#define EVA_PRIORITY_MAX (EVA_PRIORITY_LOW + 1)
// worker
typedef enum {
EVA_WORKER_STATE_UINIT = 0,
EVA_WORKER_STATE_ACTIVE,
EVA_WORKER_STATE_IDLE,
EVA_WORKER_STATE_STOP,
} EVWorkerState;
typedef struct {
SVAsync *async;
int32_t workerId;
EVWorkerState state;
TdThread thread;
SVATask *runningTask;
} SVWorker;
// task
typedef enum {
EVA_TASK_STATE_WAITTING = 0,
EVA_TASK_STATE_RUNNING,
} EVATaskState;
struct SVATask {
int64_t taskId;
EVAPriority priority;
int32_t priorScore;
SVAChannel *channel;
int32_t (*execute)(void *);
void (*complete)(void *);
void *arg;
EVATaskState state;
// wait
int32_t numWait;
TdThreadCond waitCond;
// queue
struct SVATask *prev;
struct SVATask *next;
};
#define VATASK_PIORITY(task_) ((task_)->priority - ((task_)->priorScore / 4))
// async channel
typedef enum {
EVA_CHANNEL_STATE_OPEN = 0,
EVA_CHANNEL_STATE_CLOSE,
} EVAChannelState;
struct SVAChannel {
int64_t channelId;
EVAChannelState state;
SVATask queue[EVA_PRIORITY_MAX];
SVATask *scheduled;
SVAChannel *prev;
SVAChannel *next;
};
// async handle
struct SVAsync {
const char *label;
TdThreadMutex mutex;
TdThreadCond hasTask;
bool stop;
// worker
int32_t numWorkers;
int32_t numLaunchWorkers;
int32_t numIdleWorkers;
SVWorker workers[VNODE_ASYNC_MAX_WORKERS];
// channel
int64_t nextChannelId;
int32_t numChannels;
SVAChannel chList;
SVHashTable *channelTable;
// task
int64_t nextTaskId;
int32_t numTasks;
SVATask queue[EVA_PRIORITY_MAX];
SVHashTable *taskTable;
};
static int32_t vnodeAsyncTaskDone(SVAsync *async, SVATask *task) {
int32_t ret;
if (task->channel != NULL && task->channel->scheduled == task) {
task->channel->scheduled = NULL;
if (task->channel->state == EVA_CHANNEL_STATE_CLOSE) {
taosMemoryFree(task->channel);
} else {
for (int32_t i = 0; i < EVA_PRIORITY_MAX; i++) {
SVATask *nextTask = task->channel->queue[i].next;
if (nextTask != &task->channel->queue[i]) {
if (task->channel->scheduled == NULL) {
task->channel->scheduled = nextTask;
nextTask->next->prev = nextTask->prev;
nextTask->prev->next = nextTask->next;
} else {
nextTask->priorScore++;
int32_t newPriority = VATASK_PIORITY(nextTask);
if (newPriority != i) {
// remove from current priority queue
nextTask->prev->next = nextTask->next;
nextTask->next->prev = nextTask->prev;
// add to new priority queue
nextTask->next = &task->channel->queue[newPriority];
nextTask->prev = task->channel->queue[newPriority].prev;
nextTask->next->prev = nextTask;
nextTask->prev->next = nextTask;
}
}
}
}
if (task->channel->scheduled != NULL) {
int32_t priority = VATASK_PIORITY(task->channel->scheduled);
task->channel->scheduled->next = &async->queue[priority];
task->channel->scheduled->prev = async->queue[priority].prev;
task->channel->scheduled->next->prev = task->channel->scheduled;
task->channel->scheduled->prev->next = task->channel->scheduled;
}
}
}
ret = vHashDrop(async->taskTable, task);
if (ret != 0) {
ASSERT(0);
}
async->numTasks--;
// call complete callback
if (task->complete) {
task->complete(task->arg);
}
if (task->numWait == 0) {
taosThreadCondDestroy(&task->waitCond);
taosMemoryFree(task);
} else if (task->numWait == 1) {
taosThreadCondSignal(&task->waitCond);
} else {
taosThreadCondBroadcast(&task->waitCond);
}
return 0;
}
static int32_t vnodeAsyncCancelAllTasks(SVAsync *async) {
for (int32_t i = 0; i < EVA_PRIORITY_MAX; i++) {
while (async->queue[i].next != &async->queue[i]) {
SVATask *task = async->queue[i].next;
task->prev->next = task->next;
task->next->prev = task->prev;
vnodeAsyncTaskDone(async, task);
}
}
return 0;
}
static void *vnodeAsyncLoop(void *arg) {
SVWorker *worker = (SVWorker *)arg;
SVAsync *async = worker->async;
setThreadName(async->label);
for (;;) {
taosThreadMutexLock(&async->mutex);
// finish last running task
if (worker->runningTask != NULL) {
vnodeAsyncTaskDone(async, worker->runningTask);
worker->runningTask = NULL;
}
for (;;) {
if (async->stop || worker->workerId >= async->numWorkers) {
if (async->stop) { // cancel all tasks
vnodeAsyncCancelAllTasks(async);
}
worker->state = EVA_WORKER_STATE_STOP;
async->numLaunchWorkers--;
taosThreadMutexUnlock(&async->mutex);
return NULL;
}
for (int32_t i = 0; i < EVA_PRIORITY_MAX; i++) {
SVATask *task = async->queue[i].next;
if (task != &async->queue[i]) {
if (worker->runningTask == NULL) {
worker->runningTask = task;
task->prev->next = task->next;
task->next->prev = task->prev;
} else { // promote priority
task->priorScore++;
int32_t priority = VATASK_PIORITY(task);
if (priority != i) {
// remove from current priority queue
task->prev->next = task->next;
task->next->prev = task->prev;
// add to new priority queue
task->next = &async->queue[priority];
task->prev = async->queue[priority].prev;
task->next->prev = task;
task->prev->next = task;
}
}
}
}
if (worker->runningTask == NULL) {
worker->state = EVA_WORKER_STATE_IDLE;
async->numIdleWorkers++;
taosThreadCondWait(&async->hasTask, &async->mutex);
async->numIdleWorkers--;
worker->state = EVA_WORKER_STATE_ACTIVE;
} else {
worker->runningTask->state = EVA_TASK_STATE_RUNNING;
break;
}
}
taosThreadMutexUnlock(&async->mutex);
// do run the task
worker->runningTask->execute(worker->runningTask->arg);
}
return NULL;
}
static uint32_t vnodeAsyncTaskHash(const void *obj) {
SVATask *task = (SVATask *)obj;
return MurmurHash3_32((const char *)(&task->taskId), sizeof(task->taskId));
}
static int32_t vnodeAsyncTaskCompare(const void *obj1, const void *obj2) {
SVATask *task1 = (SVATask *)obj1;
SVATask *task2 = (SVATask *)obj2;
if (task1->taskId < task2->taskId) {
return -1;
} else if (task1->taskId > task2->taskId) {
return 1;
}
return 0;
}
static uint32_t vnodeAsyncChannelHash(const void *obj) {
SVAChannel *channel = (SVAChannel *)obj;
return MurmurHash3_32((const char *)(&channel->channelId), sizeof(channel->channelId));
}
static int32_t vnodeAsyncChannelCompare(const void *obj1, const void *obj2) {
SVAChannel *channel1 = (SVAChannel *)obj1;
SVAChannel *channel2 = (SVAChannel *)obj2;
if (channel1->channelId < channel2->channelId) {
return -1;
} else if (channel1->channelId > channel2->channelId) {
return 1;
}
return 0;
}
int32_t vnodeAsyncInit(SVAsync **async, char *label) {
int32_t ret;
if (async == NULL) {
return TSDB_CODE_INVALID_PARA;
}
if (label == NULL) {
label = "anonymous";
}
(*async) = (SVAsync *)taosMemoryCalloc(1, sizeof(SVAsync) + strlen(label) + 1);
if ((*async) == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
strcpy((char *)((*async) + 1), label);
(*async)->label = (const char *)((*async) + 1);
taosThreadMutexInit(&(*async)->mutex, NULL);
taosThreadCondInit(&(*async)->hasTask, NULL);
(*async)->stop = false;
// worker
(*async)->numWorkers = VNODE_ASYNC_DEFAULT_WORKERS;
(*async)->numLaunchWorkers = 0;
(*async)->numIdleWorkers = 0;
for (int32_t i = 0; i < VNODE_ASYNC_MAX_WORKERS; i++) {
(*async)->workers[i].async = (*async);
(*async)->workers[i].workerId = i;
(*async)->workers[i].state = EVA_WORKER_STATE_UINIT;
(*async)->workers[i].runningTask = NULL;
}
// channel
(*async)->nextChannelId = 0;
(*async)->numChannels = 0;
(*async)->chList.prev = &(*async)->chList;
(*async)->chList.next = &(*async)->chList;
ret = vHashInit(&(*async)->channelTable, vnodeAsyncChannelHash, vnodeAsyncChannelCompare);
if (ret != 0) {
taosThreadMutexDestroy(&(*async)->mutex);
taosThreadCondDestroy(&(*async)->hasTask);
taosMemoryFree(*async);
return ret;
}
// task
(*async)->nextTaskId = 0;
(*async)->numTasks = 0;
for (int32_t i = 0; i < EVA_PRIORITY_MAX; i++) {
(*async)->queue[i].next = &(*async)->queue[i];
(*async)->queue[i].prev = &(*async)->queue[i];
}
ret = vHashInit(&(*async)->taskTable, vnodeAsyncTaskHash, vnodeAsyncTaskCompare);
if (ret != 0) {
vHashDestroy(&(*async)->channelTable);
taosThreadMutexDestroy(&(*async)->mutex);
taosThreadCondDestroy(&(*async)->hasTask);
taosMemoryFree(*async);
return ret;
}
return 0;
}
int32_t vnodeAsyncDestroy(SVAsync **async) {
if ((*async) == NULL) {
return TSDB_CODE_INVALID_PARA;
}
// set stop and broadcast
taosThreadMutexLock(&(*async)->mutex);
(*async)->stop = true;
taosThreadCondBroadcast(&(*async)->hasTask);
taosThreadMutexUnlock(&(*async)->mutex);
// join all workers
for (int32_t i = 0; i < VNODE_ASYNC_MAX_WORKERS; i++) {
taosThreadMutexLock(&(*async)->mutex);
EVWorkerState state = (*async)->workers[i].state;
taosThreadMutexUnlock(&(*async)->mutex);
if (state == EVA_WORKER_STATE_UINIT) {
continue;
}
taosThreadJoin((*async)->workers[i].thread, NULL);
ASSERT((*async)->workers[i].state == EVA_WORKER_STATE_STOP);
(*async)->workers[i].state = EVA_WORKER_STATE_UINIT;
}
// close all channels
for (SVAChannel *channel = (*async)->chList.next; channel != &(*async)->chList; channel = (*async)->chList.next) {
channel->next->prev = channel->prev;
channel->prev->next = channel->next;
int32_t ret = vHashDrop((*async)->channelTable, channel);
if (ret) {
ASSERT(0);
}
(*async)->numChannels--;
taosMemoryFree(channel);
}
ASSERT((*async)->numLaunchWorkers == 0);
ASSERT((*async)->numIdleWorkers == 0);
ASSERT((*async)->numChannels == 0);
ASSERT((*async)->numTasks == 0);
taosThreadMutexDestroy(&(*async)->mutex);
taosThreadCondDestroy(&(*async)->hasTask);
vHashDestroy(&(*async)->channelTable);
vHashDestroy(&(*async)->taskTable);
taosMemoryFree(*async);
*async = NULL;
return 0;
}
static int32_t vnodeAsyncLaunchWorker(SVAsync *async) {
for (int32_t i = 0; i < async->numWorkers; i++) {
ASSERT(async->workers[i].state != EVA_WORKER_STATE_IDLE);
if (async->workers[i].state == EVA_WORKER_STATE_ACTIVE) {
continue;
} else if (async->workers[i].state == EVA_WORKER_STATE_STOP) {
taosThreadJoin(async->workers[i].thread, NULL);
async->workers[i].state = EVA_WORKER_STATE_UINIT;
}
taosThreadCreate(&async->workers[i].thread, NULL, vnodeAsyncLoop, &async->workers[i]);
async->workers[i].state = EVA_WORKER_STATE_ACTIVE;
async->numLaunchWorkers++;
break;
}
return 0;
}
int32_t vnodeAsync(SVAsync *async, EVAPriority priority, int32_t (*execute)(void *), void (*complete)(void *),
void *arg, int64_t *taskId) {
return vnodeAsyncC(async, 0, priority, execute, complete, arg, taskId);
}
int32_t vnodeAsyncC(SVAsync *async, int64_t channelId, EVAPriority priority, int32_t (*execute)(void *),
void (*complete)(void *), void *arg, int64_t *taskId) {
if (async == NULL || execute == NULL || channelId < 0) {
return TSDB_CODE_INVALID_PARA;
}
int64_t id;
// create task object
SVATask *task = (SVATask *)taosMemoryCalloc(1, sizeof(SVATask));
if (task == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
task->priority = priority;
task->priorScore = 0;
task->execute = execute;
task->complete = complete;
task->arg = arg;
task->state = EVA_TASK_STATE_WAITTING;
task->numWait = 0;
taosThreadCondInit(&task->waitCond, NULL);
// schedule task
taosThreadMutexLock(&async->mutex);
if (channelId == 0) {
task->channel = NULL;
} else {
SVAChannel channel = {.channelId = channelId};
vHashGet(async->channelTable, &channel, (void **)&task->channel);
if (task->channel == NULL) {
taosThreadMutexUnlock(&async->mutex);
taosThreadCondDestroy(&task->waitCond);
taosMemoryFree(task);
return TSDB_CODE_INVALID_PARA;
}
}
task->taskId = id = ++async->nextTaskId;
// add task to hash table
int32_t ret = vHashPut(async->taskTable, task);
if (ret != 0) {
taosThreadMutexUnlock(&async->mutex);
taosThreadCondDestroy(&task->waitCond);
taosMemoryFree(task);
return ret;
}
async->numTasks++;
// add task to queue
if (task->channel == NULL || task->channel->scheduled == NULL) {
// add task to async->queue
if (task->channel) {
task->channel->scheduled = task;
}
task->next = &async->queue[priority];
task->prev = async->queue[priority].prev;
task->next->prev = task;
task->prev->next = task;
// signal worker or launch new worker
if (async->numIdleWorkers > 0) {
taosThreadCondSignal(&(async->hasTask));
} else if (async->numLaunchWorkers < async->numWorkers) {
vnodeAsyncLaunchWorker(async);
}
} else if (task->channel->scheduled->state == EVA_TASK_STATE_RUNNING ||
priority >= VATASK_PIORITY(task->channel->scheduled)) {
// add task to task->channel->queue
task->next = &task->channel->queue[priority];
task->prev = task->channel->queue[priority].prev;
task->next->prev = task;
task->prev->next = task;
} else {
// remove task->channel->scheduled from queue
task->channel->scheduled->prev->next = task->channel->scheduled->next;
task->channel->scheduled->next->prev = task->channel->scheduled->prev;
// promote priority and add task->channel->scheduled to task->channel->queue
task->channel->scheduled->priorScore++;
int32_t newPriority = VATASK_PIORITY(task->channel->scheduled);
task->channel->scheduled->next = &task->channel->queue[newPriority];
task->channel->scheduled->prev = task->channel->queue[newPriority].prev;
task->channel->scheduled->next->prev = task->channel->scheduled;
task->channel->scheduled->prev->next = task->channel->scheduled;
// add task to queue
task->channel->scheduled = task;
task->next = &async->queue[priority];
task->prev = async->queue[priority].prev;
task->next->prev = task;
task->prev->next = task;
}
taosThreadMutexUnlock(&async->mutex);
if (taskId != NULL) {
*taskId = id;
}
return 0;
}
int32_t vnodeAWait(SVAsync *async, int64_t taskId) {
if (async == NULL || taskId <= 0) {
return TSDB_CODE_INVALID_PARA;
}
SVATask *task = NULL;
SVATask task2 = {.taskId = taskId};
taosThreadMutexLock(&async->mutex);
vHashGet(async->taskTable, &task2, (void **)&task);
if (task) {
task->numWait++;
taosThreadCondWait(&task->waitCond, &async->mutex);
task->numWait--;
if (task->numWait == 0) {
taosThreadCondDestroy(&task->waitCond);
taosMemoryFree(task);
}
}
taosThreadMutexUnlock(&async->mutex);
return 0;
}
int32_t vnodeACancel(SVAsync *async, int64_t taskId) {
if (async == NULL) {
return TSDB_CODE_INVALID_PARA;
}
int32_t ret = 0;
SVATask *task = NULL;
SVATask task2 = {.taskId = taskId};
taosThreadMutexLock(&async->mutex);
vHashGet(async->taskTable, &task2, (void **)&task);
if (task) {
if (task->state == EVA_TASK_STATE_WAITTING) {
// remove from queue
task->next->prev = task->prev;
task->prev->next = task->next;
vnodeAsyncTaskDone(async, task);
} else {
ret = 0; // task is running, should return code TSDB_CODE_BUSY ??
}
}
taosThreadMutexUnlock(&async->mutex);
return ret;
}
int32_t vnodeAsyncSetWorkers(SVAsync *async, int32_t numWorkers) {
if (async == NULL || numWorkers <= 0 || numWorkers > VNODE_ASYNC_MAX_WORKERS) {
return TSDB_CODE_INVALID_PARA;
}
taosThreadMutexLock(&async->mutex);
async->numWorkers = numWorkers;
if (async->numIdleWorkers > 0) {
taosThreadCondBroadcast(&async->hasTask);
}
taosThreadMutexUnlock(&async->mutex);
return 0;
}
int32_t vnodeAChannelInit(SVAsync *async, int64_t *channelId) {
if (async == NULL || channelId == NULL) {
return TSDB_CODE_INVALID_PARA;
}
// create channel object
SVAChannel *channel = (SVAChannel *)taosMemoryMalloc(sizeof(SVAChannel));
if (channel == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
channel->state = EVA_CHANNEL_STATE_OPEN;
for (int32_t i = 0; i < EVA_PRIORITY_MAX; i++) {
channel->queue[i].next = &channel->queue[i];
channel->queue[i].prev = &channel->queue[i];
}
channel->scheduled = NULL;
// register channel
taosThreadMutexLock(&async->mutex);
channel->channelId = *channelId = ++async->nextChannelId;
// add to hash table
int32_t ret = vHashPut(async->channelTable, channel);
if (ret != 0) {
taosThreadMutexUnlock(&async->mutex);
taosMemoryFree(channel);
return ret;
}
// add to list
channel->next = &async->chList;
channel->prev = async->chList.prev;
channel->next->prev = channel;
channel->prev->next = channel;
async->numChannels++;
taosThreadMutexUnlock(&async->mutex);
return 0;
}
int32_t vnodeAChannelDestroy(SVAsync *async, int64_t channelId, bool waitRunning) {
if (async == NULL || channelId <= 0) {
return TSDB_CODE_INVALID_PARA;
}
SVAChannel *channel = NULL;
SVAChannel channel2 = {.channelId = channelId};
taosThreadMutexLock(&async->mutex);
vHashGet(async->channelTable, &channel2, (void **)&channel);
if (channel) {
// unregister channel
channel->next->prev = channel->prev;
channel->prev->next = channel->next;
vHashDrop(async->channelTable, channel);
async->numChannels--;
// cancel all waiting tasks
for (int32_t i = 0; i < EVA_PRIORITY_MAX; i++) {
while (channel->queue[i].next != &channel->queue[i]) {
SVATask *task = channel->queue[i].next;
task->prev->next = task->next;
task->next->prev = task->prev;
vnodeAsyncTaskDone(async, task);
}
}
// cancel or wait the scheduled task
if (channel->scheduled == NULL || channel->scheduled->state == EVA_TASK_STATE_WAITTING) {
if (channel->scheduled) {
channel->scheduled->prev->next = channel->scheduled->next;
channel->scheduled->next->prev = channel->scheduled->prev;
vnodeAsyncTaskDone(async, channel->scheduled);
}
taosMemoryFree(channel);
} else {
if (waitRunning) {
// wait task
SVATask *task = channel->scheduled;
task->numWait++;
taosThreadCondWait(&task->waitCond, &async->mutex);
task->numWait--;
if (task->numWait == 0) {
taosThreadCondDestroy(&task->waitCond);
taosMemoryFree(task);
}
taosMemoryFree(channel);
} else {
channel->state = EVA_CHANNEL_STATE_CLOSE;
}
}
} else {
taosThreadMutexUnlock(&async->mutex);
return TSDB_CODE_INVALID_PARA;
}
taosThreadMutexUnlock(&async->mutex);
return 0;
}

View File

@ -203,10 +203,8 @@ int vnodeSaveInfo(const char *dir, const SVnodeInfo *pInfo) {
// free info binary // free info binary
taosMemoryFree(data); taosMemoryFree(data);
vInfo("vgId:%d, vnode info is saved, fname:%s replica:%d selfIndex:%d changeVersion:%d", vInfo("vgId:%d, vnode info is saved, fname:%s replica:%d selfIndex:%d changeVersion:%d", pInfo->config.vgId, fname,
pInfo->config.vgId, fname, pInfo->config.syncCfg.replicaNum, pInfo->config.syncCfg.myIndex, pInfo->config.syncCfg.changeVersion);
pInfo->config.syncCfg.replicaNum, pInfo->config.syncCfg.myIndex,
pInfo->config.syncCfg.changeVersion);
return 0; return 0;
@ -289,9 +287,10 @@ static int32_t vnodePrepareCommit(SVnode *pVnode, SCommitInfo *pInfo) {
char dir[TSDB_FILENAME_LEN] = {0}; char dir[TSDB_FILENAME_LEN] = {0};
int64_t lastCommitted = pInfo->info.state.committed; int64_t lastCommitted = pInfo->info.state.committed;
tsem_wait(&pVnode->canCommit); // wait last commit task
vnodeAWait(vnodeAsyncHandle[0], pVnode->commitTask);
if(syncNodeGetConfig(pVnode->sync, &pVnode->config.syncCfg) != 0) goto _exit; if (syncNodeGetConfig(pVnode->sync, &pVnode->config.syncCfg) != 0) goto _exit;
pVnode->state.commitTerm = pVnode->state.applyTerm; pVnode->state.commitTerm = pVnode->state.applyTerm;
@ -379,12 +378,11 @@ static int32_t vnodeCommitTask(void *arg) {
vnodeReturnBufPool(pVnode); vnodeReturnBufPool(pVnode);
_exit: _exit:
// end commit
tsem_post(&pVnode->canCommit);
taosMemoryFree(pInfo);
return code; return code;
} }
static void vnodeCompleteCommit(void *arg) { taosMemoryFree(arg); }
int vnodeAsyncCommit(SVnode *pVnode) { int vnodeAsyncCommit(SVnode *pVnode) {
int32_t code = 0; int32_t code = 0;
@ -401,14 +399,14 @@ int vnodeAsyncCommit(SVnode *pVnode) {
} }
// schedule the task // schedule the task
code = vnodeScheduleTask(vnodeCommitTask, pInfo); code = vnodeAsyncC(vnodeAsyncHandle[0], pVnode->commitChannel, EVA_PRIORITY_HIGH, vnodeCommitTask,
vnodeCompleteCommit, pInfo, &pVnode->commitTask);
_exit: _exit:
if (code) { if (code) {
if (NULL != pInfo) { if (NULL != pInfo) {
taosMemoryFree(pInfo); taosMemoryFree(pInfo);
} }
tsem_post(&pVnode->canCommit);
vError("vgId:%d, %s failed since %s, commit id:%" PRId64, TD_VID(pVnode), __func__, tstrerror(code), vError("vgId:%d, %s failed since %s, commit id:%" PRId64, TD_VID(pVnode), __func__, tstrerror(code),
pVnode->state.commitID); pVnode->state.commitID);
} else { } else {
@ -420,8 +418,7 @@ _exit:
int vnodeSyncCommit(SVnode *pVnode) { int vnodeSyncCommit(SVnode *pVnode) {
vnodeAsyncCommit(pVnode); vnodeAsyncCommit(pVnode);
tsem_wait(&pVnode->canCommit); vnodeAWait(vnodeAsyncHandle[0], pVnode->commitTask);
tsem_post(&pVnode->canCommit);
return 0; return 0;
} }
@ -501,7 +498,7 @@ _exit:
} }
bool vnodeShouldRollback(SVnode *pVnode) { bool vnodeShouldRollback(SVnode *pVnode) {
char tFName[TSDB_FILENAME_LEN] = {0}; char tFName[TSDB_FILENAME_LEN] = {0};
int32_t offset = 0; int32_t offset = 0;
vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, tFName, TSDB_FILENAME_LEN); vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, tFName, TSDB_FILENAME_LEN);
@ -512,7 +509,7 @@ bool vnodeShouldRollback(SVnode *pVnode) {
} }
void vnodeRollback(SVnode *pVnode) { void vnodeRollback(SVnode *pVnode) {
char tFName[TSDB_FILENAME_LEN] = {0}; char tFName[TSDB_FILENAME_LEN] = {0};
int32_t offset = 0; int32_t offset = 0;
vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, tFName, TSDB_FILENAME_LEN); vnodeGetPrimaryDir(pVnode->path, pVnode->diskPrimary, pVnode->pTfs, tFName, TSDB_FILENAME_LEN);

View File

@ -0,0 +1,162 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "vnodeHash.h"
#define VNODE_HASH_DEFAULT_NUM_BUCKETS 1024
typedef struct SVHashEntry SVHashEntry;
struct SVHashEntry {
SVHashEntry* next;
void* obj;
};
struct SVHashTable {
uint32_t (*hash)(const void*);
int32_t (*compare)(const void*, const void*);
int32_t numEntries;
uint32_t numBuckets;
SVHashEntry** buckets;
};
static int32_t vHashRehash(SVHashTable* ht, uint32_t newNumBuckets) {
SVHashEntry** newBuckets = (SVHashEntry**)taosMemoryCalloc(newNumBuckets, sizeof(SVHashEntry*));
if (newBuckets == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < ht->numBuckets; i++) {
SVHashEntry* entry = ht->buckets[i];
while (entry != NULL) {
SVHashEntry* next = entry->next;
uint32_t bucketIndex = ht->hash(entry->obj) % newNumBuckets;
entry->next = newBuckets[bucketIndex];
newBuckets[bucketIndex] = entry;
entry = next;
}
}
taosMemoryFree(ht->buckets);
ht->buckets = newBuckets;
ht->numBuckets = newNumBuckets;
return 0;
}
int32_t vHashInit(SVHashTable** ht, uint32_t (*hash)(const void*), int32_t (*compare)(const void*, const void*)) {
if (ht == NULL || hash == NULL || compare == NULL) {
return TSDB_CODE_INVALID_PARA;
}
(*ht) = (SVHashTable*)taosMemoryMalloc(sizeof(SVHashTable));
if (*ht == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
(*ht)->hash = hash;
(*ht)->compare = compare;
(*ht)->numEntries = 0;
(*ht)->numBuckets = VNODE_HASH_DEFAULT_NUM_BUCKETS;
(*ht)->buckets = (SVHashEntry**)taosMemoryCalloc((*ht)->numBuckets, sizeof(SVHashEntry*));
if ((*ht)->buckets == NULL) {
taosMemoryFree(*ht);
return TSDB_CODE_OUT_OF_MEMORY;
}
return 0;
}
int32_t vHashDestroy(SVHashTable** ht) {
if (ht == NULL) {
return TSDB_CODE_INVALID_PARA;
}
if (*ht) {
ASSERT((*ht)->numEntries == 0);
taosMemoryFree((*ht)->buckets);
taosMemoryFree(*ht);
(*ht) = NULL;
}
return 0;
}
int32_t vHashPut(SVHashTable* ht, void* obj) {
if (ht == NULL || obj == NULL) {
return TSDB_CODE_INVALID_PARA;
}
uint32_t bucketIndex = ht->hash(obj) % ht->numBuckets;
for (SVHashEntry* entry = ht->buckets[bucketIndex]; entry != NULL; entry = entry->next) {
if (ht->compare(entry->obj, obj) == 0) {
return TSDB_CODE_DUP_KEY;
}
}
if (ht->numEntries >= ht->numBuckets) {
vHashRehash(ht, ht->numBuckets * 2);
bucketIndex = ht->hash(obj) % ht->numBuckets;
}
SVHashEntry* entry = (SVHashEntry*)taosMemoryMalloc(sizeof(SVHashEntry));
if (entry == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
entry->obj = obj;
entry->next = ht->buckets[bucketIndex];
ht->buckets[bucketIndex] = entry;
ht->numEntries++;
return 0;
}
int32_t vHashGet(SVHashTable* ht, const void* obj, void** retObj) {
if (ht == NULL || obj == NULL || retObj == NULL) {
return TSDB_CODE_INVALID_PARA;
}
uint32_t bucketIndex = ht->hash(obj) % ht->numBuckets;
for (SVHashEntry* entry = ht->buckets[bucketIndex]; entry != NULL; entry = entry->next) {
if (ht->compare(entry->obj, obj) == 0) {
*retObj = entry->obj;
return 0;
}
}
*retObj = NULL;
return TSDB_CODE_NOT_FOUND;
}
int32_t vHashDrop(SVHashTable* ht, const void* obj) {
if (ht == NULL || obj == NULL) {
return TSDB_CODE_INVALID_PARA;
}
uint32_t bucketIndex = ht->hash(obj) % ht->numBuckets;
for (SVHashEntry** entry = &ht->buckets[bucketIndex]; *entry != NULL; entry = &(*entry)->next) {
if (ht->compare((*entry)->obj, obj) == 0) {
SVHashEntry* tmp = *entry;
*entry = (*entry)->next;
taosMemoryFree(tmp);
ht->numEntries--;
if (ht->numBuckets > VNODE_HASH_DEFAULT_NUM_BUCKETS && ht->numEntries < ht->numBuckets / 4) {
vHashRehash(ht, ht->numBuckets / 2);
}
return 0;
}
}
return TSDB_CODE_NOT_FOUND;
}

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _VNODE_HAS_H_
#define _VNODE_HAS_H_
#include "vnd.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct SVHashTable SVHashTable;
int32_t vHashInit(SVHashTable** ht, uint32_t (*hash)(const void*), int32_t (*compare)(const void*, const void*));
int32_t vHashDestroy(SVHashTable** ht);
int32_t vHashPut(SVHashTable* ht, void* obj);
int32_t vHashGet(SVHashTable* ht, const void* obj, void** retObj);
int32_t vHashDrop(SVHashTable* ht, const void* obj);
#ifdef __cplusplus
}
#endif
#endif /*_VNODE_HAS_H_*/

View File

@ -16,65 +16,25 @@
#include "cos.h" #include "cos.h"
#include "vnd.h" #include "vnd.h"
typedef struct SVnodeTask SVnodeTask; static volatile int32_t VINIT = 0;
struct SVnodeTask {
SVnodeTask* next;
SVnodeTask* prev;
int (*execute)(void*);
void* arg;
};
typedef struct { SVAsync* vnodeAsyncHandle[2];
int nthreads;
TdThread* threads;
TdThreadMutex mutex;
TdThreadCond hasTask;
SVnodeTask queue;
} SVnodeThreadPool;
struct SVnodeGlobal {
int8_t init;
int8_t stop;
SVnodeThreadPool tp[2];
};
struct SVnodeGlobal vnodeGlobal;
static void* loop(void* arg);
int vnodeInit(int nthreads) { int vnodeInit(int nthreads) {
int8_t init; int32_t init;
int ret;
init = atomic_val_compare_exchange_8(&(vnodeGlobal.init), 0, 1); init = atomic_val_compare_exchange_32(&VINIT, 0, 1);
if (init) { if (init) {
return 0; return 0;
} }
vnodeGlobal.stop = 0;
for (int32_t i = 0; i < ARRAY_SIZE(vnodeGlobal.tp); i++) { // vnode-commit
taosThreadMutexInit(&vnodeGlobal.tp[i].mutex, NULL); vnodeAsyncInit(&vnodeAsyncHandle[0], "vnode-commit");
taosThreadCondInit(&vnodeGlobal.tp[i].hasTask, NULL); vnodeAsyncSetWorkers(vnodeAsyncHandle[0], nthreads);
taosThreadMutexLock(&vnodeGlobal.tp[i].mutex); // vnode-merge
vnodeAsyncInit(&vnodeAsyncHandle[1], "vnode-merge");
vnodeGlobal.tp[i].queue.next = &vnodeGlobal.tp[i].queue; vnodeAsyncSetWorkers(vnodeAsyncHandle[1], nthreads);
vnodeGlobal.tp[i].queue.prev = &vnodeGlobal.tp[i].queue;
taosThreadMutexUnlock(&(vnodeGlobal.tp[i].mutex));
vnodeGlobal.tp[i].nthreads = nthreads;
vnodeGlobal.tp[i].threads = taosMemoryCalloc(nthreads, sizeof(TdThread));
if (vnodeGlobal.tp[i].threads == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
vError("failed to init vnode module since:%s", tstrerror(terrno));
return -1;
}
for (int j = 0; j < nthreads; j++) {
taosThreadCreate(&(vnodeGlobal.tp[i].threads[j]), NULL, loop, &vnodeGlobal.tp[i]);
}
}
if (walInit() < 0) { if (walInit() < 0) {
return -1; return -1;
@ -90,99 +50,15 @@ int vnodeInit(int nthreads) {
} }
void vnodeCleanup() { void vnodeCleanup() {
int8_t init; int32_t init = atomic_val_compare_exchange_32(&VINIT, 1, 0);
init = atomic_val_compare_exchange_8(&(vnodeGlobal.init), 1, 0);
if (init == 0) return; if (init == 0) return;
// set stop // set stop
vnodeGlobal.stop = 1; vnodeAsyncDestroy(&vnodeAsyncHandle[0]);
for (int32_t i = 0; i < ARRAY_SIZE(vnodeGlobal.tp); i++) { vnodeAsyncDestroy(&vnodeAsyncHandle[1]);
taosThreadMutexLock(&(vnodeGlobal.tp[i].mutex));
taosThreadCondBroadcast(&(vnodeGlobal.tp[i].hasTask));
taosThreadMutexUnlock(&(vnodeGlobal.tp[i].mutex));
// wait for threads
for (int j = 0; j < vnodeGlobal.tp[i].nthreads; j++) {
taosThreadJoin(vnodeGlobal.tp[i].threads[j], NULL);
}
// clear source
taosMemoryFreeClear(vnodeGlobal.tp[i].threads);
taosThreadCondDestroy(&(vnodeGlobal.tp[i].hasTask));
taosThreadMutexDestroy(&(vnodeGlobal.tp[i].mutex));
}
walCleanUp(); walCleanUp();
tqCleanUp(); tqCleanUp();
smaCleanUp(); smaCleanUp();
s3CleanUp(); s3CleanUp();
} }
int vnodeScheduleTaskEx(int tpid, int (*execute)(void*), void* arg) {
SVnodeTask* pTask;
ASSERT(!vnodeGlobal.stop);
pTask = taosMemoryMalloc(sizeof(*pTask));
if (pTask == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
pTask->execute = execute;
pTask->arg = arg;
taosThreadMutexLock(&(vnodeGlobal.tp[tpid].mutex));
pTask->next = &vnodeGlobal.tp[tpid].queue;
pTask->prev = vnodeGlobal.tp[tpid].queue.prev;
vnodeGlobal.tp[tpid].queue.prev->next = pTask;
vnodeGlobal.tp[tpid].queue.prev = pTask;
taosThreadCondSignal(&(vnodeGlobal.tp[tpid].hasTask));
taosThreadMutexUnlock(&(vnodeGlobal.tp[tpid].mutex));
return 0;
}
int vnodeScheduleTask(int (*execute)(void*), void* arg) { return vnodeScheduleTaskEx(0, execute, arg); }
/* ------------------------ STATIC METHODS ------------------------ */
static void* loop(void* arg) {
SVnodeThreadPool* tp = (SVnodeThreadPool*)arg;
SVnodeTask* pTask;
int ret;
if (tp == &vnodeGlobal.tp[0]) {
setThreadName("vnode-commit");
} else if (tp == &vnodeGlobal.tp[1]) {
setThreadName("vnode-merge");
}
for (;;) {
taosThreadMutexLock(&(tp->mutex));
for (;;) {
pTask = tp->queue.next;
if (pTask == &tp->queue) {
// no task
if (vnodeGlobal.stop) {
taosThreadMutexUnlock(&(tp->mutex));
return NULL;
} else {
taosThreadCondWait(&(tp->hasTask), &(tp->mutex));
}
} else {
// has task
pTask->prev->next = pTask->next;
pTask->next->prev = pTask->prev;
break;
}
}
taosThreadMutexUnlock(&(tp->mutex));
pTask->execute(pTask->arg);
taosMemoryFree(pTask);
}
return NULL;
}

View File

@ -129,8 +129,8 @@ int32_t vnodeAlterReplica(const char *path, SAlterVnodeReplicaReq *pReq, int32_t
} }
pCfg->changeVersion = pReq->changeVersion; pCfg->changeVersion = pReq->changeVersion;
vInfo("vgId:%d, save config while alter, replicas:%d totalReplicas:%d selfIndex:%d changeVersion:%d", vInfo("vgId:%d, save config while alter, replicas:%d totalReplicas:%d selfIndex:%d changeVersion:%d", pReq->vgId,
pReq->vgId, pCfg->replicaNum, pCfg->totalReplicaNum, pCfg->myIndex, pCfg->changeVersion); pCfg->replicaNum, pCfg->totalReplicaNum, pCfg->myIndex, pCfg->changeVersion);
info.config.syncCfg = *pCfg; info.config.syncCfg = *pCfg;
ret = vnodeSaveInfo(dir, &info); ret = vnodeSaveInfo(dir, &info);
@ -396,10 +396,14 @@ SVnode *vnodeOpen(const char *path, int32_t diskPrimary, STfs *pTfs, SMsgCb msgC
pVnode->blocked = false; pVnode->blocked = false;
tsem_init(&pVnode->syncSem, 0, 0); tsem_init(&pVnode->syncSem, 0, 0);
tsem_init(&(pVnode->canCommit), 0, 1);
taosThreadMutexInit(&pVnode->mutex, NULL); taosThreadMutexInit(&pVnode->mutex, NULL);
taosThreadCondInit(&pVnode->poolNotEmpty, NULL); taosThreadCondInit(&pVnode->poolNotEmpty, NULL);
if (vnodeAChannelInit(vnodeAsyncHandle[0], &pVnode->commitChannel) != 0) {
vError("vgId:%d, failed to init commit channel", TD_VID(pVnode));
goto _err;
}
int8_t rollback = vnodeShouldRollback(pVnode); int8_t rollback = vnodeShouldRollback(pVnode);
// open buffer pool // open buffer pool
@ -487,7 +491,6 @@ _err:
if (pVnode->pMeta) metaClose(&pVnode->pMeta); if (pVnode->pMeta) metaClose(&pVnode->pMeta);
if (pVnode->freeList) vnodeCloseBufPool(pVnode); if (pVnode->freeList) vnodeCloseBufPool(pVnode);
tsem_destroy(&(pVnode->canCommit));
taosMemoryFree(pVnode); taosMemoryFree(pVnode);
return NULL; return NULL;
} }
@ -501,7 +504,8 @@ void vnodePostClose(SVnode *pVnode) { vnodeSyncPostClose(pVnode); }
void vnodeClose(SVnode *pVnode) { void vnodeClose(SVnode *pVnode) {
if (pVnode) { if (pVnode) {
tsem_wait(&pVnode->canCommit); vnodeAWait(vnodeAsyncHandle[0], pVnode->commitTask);
vnodeAChannelDestroy(vnodeAsyncHandle[0], pVnode->commitChannel, true);
vnodeSyncClose(pVnode); vnodeSyncClose(pVnode);
vnodeQueryClose(pVnode); vnodeQueryClose(pVnode);
tqClose(pVnode->pTq); tqClose(pVnode->pTq);
@ -510,10 +514,8 @@ void vnodeClose(SVnode *pVnode) {
smaClose(pVnode->pSma); smaClose(pVnode->pSma);
if (pVnode->pMeta) metaClose(&pVnode->pMeta); if (pVnode->pMeta) metaClose(&pVnode->pMeta);
vnodeCloseBufPool(pVnode); vnodeCloseBufPool(pVnode);
tsem_post(&pVnode->canCommit);
// destroy handle // destroy handle
tsem_destroy(&(pVnode->canCommit));
tsem_destroy(&pVnode->syncSem); tsem_destroy(&pVnode->syncSem);
taosThreadCondDestroy(&pVnode->poolNotEmpty); taosThreadCondDestroy(&pVnode->poolNotEmpty);
taosThreadMutexDestroy(&pVnode->mutex); taosThreadMutexDestroy(&pVnode->mutex);

View File

@ -13,8 +13,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include "vnd.h"
#include "tsdb.h" #include "tsdb.h"
#include "vnd.h"
// SVSnapReader ======================================================== // SVSnapReader ========================================================
struct SVSnapReader { struct SVSnapReader {
@ -32,11 +32,11 @@ struct SVSnapReader {
TSnapRangeArray *pRanges; TSnapRangeArray *pRanges;
STsdbSnapReader *pTsdbReader; STsdbSnapReader *pTsdbReader;
// tq // tq
int8_t tqHandleDone; int8_t tqHandleDone;
STqSnapReader *pTqSnapReader; STqSnapReader *pTqSnapReader;
int8_t tqOffsetDone; int8_t tqOffsetDone;
STqOffsetReader *pTqOffsetReader; STqOffsetReader *pTqOffsetReader;
int8_t tqCheckInfoDone; int8_t tqCheckInfoDone;
STqCheckInfoReader *pTqCheckInfoReader; STqCheckInfoReader *pTqCheckInfoReader;
// stream // stream
int8_t streamTaskDone; int8_t streamTaskDone;
@ -458,8 +458,8 @@ struct SVSnapWriter {
TSnapRangeArray *pRanges; TSnapRangeArray *pRanges;
STsdbSnapWriter *pTsdbSnapWriter; STsdbSnapWriter *pTsdbSnapWriter;
// tq // tq
STqSnapWriter *pTqSnapWriter; STqSnapWriter *pTqSnapWriter;
STqOffsetWriter *pTqOffsetWriter; STqOffsetWriter *pTqOffsetWriter;
STqCheckInfoWriter *pTqCheckInfoWriter; STqCheckInfoWriter *pTqCheckInfoWriter;
// stream // stream
SStreamTaskWriter *pStreamTaskWriter; SStreamTaskWriter *pStreamTaskWriter;
@ -519,6 +519,8 @@ _out:
return code; return code;
} }
extern int32_t tsdbCancelAllBgTask(STsdb *tsdb);
int32_t vnodeSnapWriterOpen(SVnode *pVnode, SSnapshotParam *pParam, SVSnapWriter **ppWriter) { int32_t vnodeSnapWriterOpen(SVnode *pVnode, SSnapshotParam *pParam, SVSnapWriter **ppWriter) {
int32_t code = 0; int32_t code = 0;
SVSnapWriter *pWriter = NULL; SVSnapWriter *pWriter = NULL;
@ -526,8 +528,8 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, SSnapshotParam *pParam, SVSnapWriter
int64_t ever = pParam->end; int64_t ever = pParam->end;
// commit memory data // commit memory data
vnodeAsyncCommit(pVnode); vnodeSyncCommit(pVnode);
tsem_wait(&pVnode->canCommit); tsdbCancelAllBgTask(pVnode->pTsdb);
// alloc // alloc
pWriter = (SVSnapWriter *)taosMemoryCalloc(1, sizeof(*pWriter)); pWriter = (SVSnapWriter *)taosMemoryCalloc(1, sizeof(*pWriter));
@ -657,7 +659,6 @@ _exit:
vInfo("vgId:%d, vnode snapshot writer closed, rollback:%d", TD_VID(pVnode), rollback); vInfo("vgId:%d, vnode snapshot writer closed, rollback:%d", TD_VID(pVnode), rollback);
taosMemoryFree(pWriter); taosMemoryFree(pWriter);
} }
tsem_post(&pVnode->canCommit);
return code; return code;
} }

View File

@ -610,6 +610,13 @@ void streamEventReloadState(SOperatorInfo* pOperator) {
compactEventWindow(pOperator, &curInfo, pInfo->pSeUpdated, pInfo->pSeDeleted, false); compactEventWindow(pOperator, &curInfo, pInfo->pSeUpdated, pInfo->pSeDeleted, false);
qDebug("===stream=== reload state. save result %" PRId64 ", %" PRIu64, curInfo.winInfo.sessionWin.win.skey, qDebug("===stream=== reload state. save result %" PRId64 ", %" PRIu64, curInfo.winInfo.sessionWin.win.skey,
curInfo.winInfo.sessionWin.groupId); curInfo.winInfo.sessionWin.groupId);
if (IS_VALID_SESSION_WIN(curInfo.winInfo)) {
saveSessionOutputBuf(pAggSup, &curInfo.winInfo);
}
if (!curInfo.pWinFlag->endFlag) {
continue;
}
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
saveResult(curInfo.winInfo, pInfo->pSeUpdated); saveResult(curInfo.winInfo, pInfo->pSeUpdated);
@ -621,10 +628,6 @@ void streamEventReloadState(SOperatorInfo* pOperator) {
getSessionHashKey(&curInfo.winInfo.sessionWin, &key); getSessionHashKey(&curInfo.winInfo.sessionWin, &key);
tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &curInfo.winInfo, sizeof(SResultWindowInfo)); tSimpleHashPut(pAggSup->pResultRows, &key, sizeof(SSessionKey), &curInfo.winInfo, sizeof(SResultWindowInfo));
} }
if (IS_VALID_SESSION_WIN(curInfo.winInfo)) {
saveSessionOutputBuf(pAggSup, &curInfo.winInfo);
}
} }
taosMemoryFree(pBuf); taosMemoryFree(pBuf);

View File

@ -276,15 +276,15 @@ static int32_t getAllIntervalWindow(SSHashObj* pHashMap, SSHashObj* resWins) {
void* pIte = NULL; void* pIte = NULL;
int32_t iter = 0; int32_t iter = 0;
while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) { while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) {
SWinKey* pKey = tSimpleHashGetKey(pIte, NULL); SWinKey* pKey = tSimpleHashGetKey(pIte, NULL);
uint64_t groupId = pKey->groupId; uint64_t groupId = pKey->groupId;
TSKEY ts = pKey->ts; TSKEY ts = pKey->ts;
SRowBuffPos* pPos = *(SRowBuffPos**)pIte; SRowBuffPos* pPos = *(SRowBuffPos**)pIte;
if (!pPos->beUpdated) { if (!pPos->beUpdated) {
continue; continue;
} }
pPos->beUpdated = false; pPos->beUpdated = false;
int32_t code = saveWinResultInfo(ts, groupId, pPos, resWins); int32_t code = saveWinResultInfo(ts, groupId, pPos, resWins);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
return code; return code;
} }
@ -1091,10 +1091,10 @@ void doStreamIntervalDecodeOpState(void* buf, int32_t len, SOperatorInfo* pOpera
int32_t mapSize = 0; int32_t mapSize = 0;
buf = taosDecodeFixedI32(buf, &mapSize); buf = taosDecodeFixedI32(buf, &mapSize);
for (int32_t i = 0; i < mapSize; i++) { for (int32_t i = 0; i < mapSize; i++) {
SWinKey key = {0}; SWinKey key = {0};
buf = decodeSWinKey(buf, &key); buf = decodeSWinKey(buf, &key);
SRowBuffPos* pPos = NULL; SRowBuffPos* pPos = NULL;
int32_t resSize = pInfo->aggSup.resultRowSize; int32_t resSize = pInfo->aggSup.resultRowSize;
pInfo->stateStore.streamStateAddIfNotExist(pInfo->pState, &key, (void**)&pPos, &resSize); pInfo->stateStore.streamStateAddIfNotExist(pInfo->pState, &key, (void**)&pPos, &resSize);
tSimpleHashPut(pInfo->aggSup.pResultRowHashTable, &key, sizeof(SWinKey), &pPos, POINTER_BYTES); tSimpleHashPut(pInfo->aggSup.pResultRowHashTable, &key, sizeof(SWinKey), &pPos, POINTER_BYTES);
} }
@ -1165,7 +1165,7 @@ static SSDataBlock* buildIntervalResult(SOperatorInfo* pOperator) {
return NULL; return NULL;
} }
int32_t copyUpdateResult(SSHashObj** ppWinUpdated, SArray* pUpdated, __compar_fn_t compar) { int32_t copyUpdateResult(SSHashObj** ppWinUpdated, SArray* pUpdated, __compar_fn_t compar) {
void* pIte = NULL; void* pIte = NULL;
int32_t iter = 0; int32_t iter = 0;
while ((pIte = tSimpleHashIterate(*ppWinUpdated, pIte, &iter)) != NULL) { while ((pIte = tSimpleHashIterate(*ppWinUpdated, pIte, &iter)) != NULL) {
@ -1402,10 +1402,12 @@ void streamIntervalReloadState(SOperatorInfo* pOperator) {
void* pBuf = NULL; void* pBuf = NULL;
int32_t code = pInfo->stateStore.streamStateGetInfo(pInfo->pState, STREAM_INTERVAL_OP_STATE_NAME, int32_t code = pInfo->stateStore.streamStateGetInfo(pInfo->pState, STREAM_INTERVAL_OP_STATE_NAME,
strlen(STREAM_INTERVAL_OP_STATE_NAME), &pBuf, &size); strlen(STREAM_INTERVAL_OP_STATE_NAME), &pBuf, &size);
TSKEY ts = *(TSKEY*)pBuf; if (code == 0) {
taosMemoryFree(pBuf); TSKEY ts = *(TSKEY*)pBuf;
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, ts); taosMemoryFree(pBuf);
pInfo->stateStore.streamStateReloadInfo(pInfo->pState, ts); pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, ts);
pInfo->stateStore.streamStateReloadInfo(pInfo->pState, ts);
}
} }
SOperatorInfo* downstream = pOperator->pDownstream[0]; SOperatorInfo* downstream = pOperator->pDownstream[0];
if (downstream->fpSet.reloadStreamStateFn) { if (downstream->fpSet.reloadStreamStateFn) {
@ -1723,8 +1725,8 @@ void setSessionOutputBuf(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endT
pCurWin->sessionWin.win.skey = startTs; pCurWin->sessionWin.win.skey = startTs;
pCurWin->sessionWin.win.ekey = endTs; pCurWin->sessionWin.win.ekey = endTs;
int32_t size = pAggSup->resultRowSize; int32_t size = pAggSup->resultRowSize;
int32_t code = pAggSup->stateStore.streamStateSessionAddIfNotExist(pAggSup->pState, &pCurWin->sessionWin, int32_t code = pAggSup->stateStore.streamStateSessionAddIfNotExist(pAggSup->pState, &pCurWin->sessionWin,
pAggSup->gap, (void**)&pCurWin->pStatePos, &size); pAggSup->gap, (void**)&pCurWin->pStatePos, &size);
if (code == TSDB_CODE_SUCCESS && !inWinRange(&pAggSup->winRange, &pCurWin->sessionWin.win)) { if (code == TSDB_CODE_SUCCESS && !inWinRange(&pAggSup->winRange, &pCurWin->sessionWin.win)) {
code = TSDB_CODE_FAILED; code = TSDB_CODE_FAILED;
clearOutputBuf(pAggSup->pState, pCurWin->pStatePos, &pAggSup->pSessionAPI->stateStore); clearOutputBuf(pAggSup->pState, pCurWin->pStatePos, &pAggSup->pSessionAPI->stateStore);
@ -1822,9 +1824,9 @@ void removeSessionResults(SStreamAggSupporter* pAggSup, SSHashObj* pHashMap, SAr
} }
} }
int32_t updateSessionWindowInfo(SStreamAggSupporter* pAggSup, SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t groupId, int32_t updateSessionWindowInfo(SStreamAggSupporter* pAggSup, SResultWindowInfo* pWinInfo, TSKEY* pStartTs,
int32_t rows, int32_t start, int64_t gap, SSHashObj* pResultRows, SSHashObj* pStUpdated, TSKEY* pEndTs, uint64_t groupId, int32_t rows, int32_t start, int64_t gap,
SSHashObj* pStDeleted) { SSHashObj* pResultRows, SSHashObj* pStUpdated, SSHashObj* pStDeleted) {
for (int32_t i = start; i < rows; ++i) { for (int32_t i = start; i < rows; ++i) {
if (!isInWindow(pWinInfo, pStartTs[i], gap) && (!pEndTs || !isInWindow(pWinInfo, pEndTs[i], gap))) { if (!isInWindow(pWinInfo, pStartTs[i], gap) && (!pEndTs || !isInWindow(pWinInfo, pEndTs[i], gap))) {
return i - start; return i - start;
@ -1856,8 +1858,8 @@ static int32_t initSessionOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pR
} }
int32_t doOneWindowAggImpl(SColumnInfoData* pTimeWindowData, SResultWindowInfo* pCurWin, SResultRow** pResult, int32_t doOneWindowAggImpl(SColumnInfoData* pTimeWindowData, SResultWindowInfo* pCurWin, SResultRow** pResult,
int32_t startIndex, int32_t winRows, int32_t rows, int32_t numOutput, int32_t startIndex, int32_t winRows, int32_t rows, int32_t numOutput,
SOperatorInfo* pOperator, int64_t winDelta) { SOperatorInfo* pOperator, int64_t winDelta) {
SExprSupp* pSup = &pOperator->exprSupp; SExprSupp* pSup = &pOperator->exprSupp;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
int32_t code = initSessionOutputBuf(pCurWin, pResult, pSup->pCtx, numOutput, pSup->rowEntryInfoOffset); int32_t code = initSessionOutputBuf(pCurWin, pResult, pSup->pCtx, numOutput, pSup->rowEntryInfoOffset);
@ -1981,9 +1983,10 @@ static void compactSessionSemiWindow(SOperatorInfo* pOperator, SResultWindowInfo
} }
int32_t saveSessionOutputBuf(SStreamAggSupporter* pAggSup, SResultWindowInfo* pWinInfo) { int32_t saveSessionOutputBuf(SStreamAggSupporter* pAggSup, SResultWindowInfo* pWinInfo) {
qDebug("===stream===try save session result skey:%" PRId64 ", ekey:%" PRId64 ".pos%d", qDebug("===stream===try save session result skey:%" PRId64 ", ekey:%" PRId64 ".pos%d", pWinInfo->sessionWin.win.skey,
pWinInfo->sessionWin.win.skey, pWinInfo->sessionWin.win.ekey, pWinInfo->pStatePos->needFree); pWinInfo->sessionWin.win.ekey, pWinInfo->pStatePos->needFree);
return pAggSup->stateStore.streamStateSessionPut(pAggSup->pState, &pWinInfo->sessionWin, pWinInfo->pStatePos, pAggSup->resultRowSize); return pAggSup->stateStore.streamStateSessionPut(pAggSup->pState, &pWinInfo->sessionWin, pWinInfo->pStatePos,
pAggSup->resultRowSize);
} }
static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBlock, SSHashObj* pStUpdated, static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBlock, SSHashObj* pStUpdated,
@ -2045,7 +2048,8 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pStUpdated) { if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pStUpdated) {
code = saveResult(winInfo, pStUpdated); code = saveResult(winInfo, pStUpdated);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
qError("%s do stream session aggregate impl, set result error, code %s", GET_TASKID(pTaskInfo), tstrerror(code)); qError("%s do stream session aggregate impl, set result error, code %s", GET_TASKID(pTaskInfo),
tstrerror(code));
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY); T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
} }
} }
@ -2084,8 +2088,8 @@ void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SArr
inline int32_t sessionKeyCompareAsc(const void* pKey1, const void* pKey2) { inline int32_t sessionKeyCompareAsc(const void* pKey1, const void* pKey2) {
SResultWindowInfo* pWinInfo1 = (SResultWindowInfo*)pKey1; SResultWindowInfo* pWinInfo1 = (SResultWindowInfo*)pKey1;
SResultWindowInfo* pWinInfo2 = (SResultWindowInfo*)pKey2; SResultWindowInfo* pWinInfo2 = (SResultWindowInfo*)pKey2;
SSessionKey* pWin1 = &pWinInfo1->sessionWin; SSessionKey* pWin1 = &pWinInfo1->sessionWin;
SSessionKey* pWin2 = &pWinInfo2->sessionWin; SSessionKey* pWin2 = &pWinInfo2->sessionWin;
if (pWin1->groupId > pWin2->groupId) { if (pWin1->groupId > pWin2->groupId) {
return 1; return 1;
@ -2290,9 +2294,9 @@ int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, void* pState, SSDa
for (int32_t i = pGroupResInfo->index; i < numOfRows; i += 1) { for (int32_t i = pGroupResInfo->index; i < numOfRows; i += 1) {
SResultWindowInfo* pWinInfo = taosArrayGet(pGroupResInfo->pRows, i); SResultWindowInfo* pWinInfo = taosArrayGet(pGroupResInfo->pRows, i);
SRowBuffPos* pPos = pWinInfo->pStatePos; SRowBuffPos* pPos = pWinInfo->pStatePos;
SResultRow* pRow = NULL; SResultRow* pRow = NULL;
SSessionKey* pKey = (SSessionKey*) pPos->pKey; SSessionKey* pKey = (SSessionKey*)pPos->pKey;
if (pBlock->info.id.groupId == 0) { if (pBlock->info.id.groupId == 0) {
pBlock->info.id.groupId = pKey->groupId; pBlock->info.id.groupId = pKey->groupId;
@ -2312,7 +2316,7 @@ int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, void* pState, SSDa
} }
} }
int32_t code = pAPI->stateStore.streamStateGetByPos(pState, pPos, (void**)&pRow); int32_t code = pAPI->stateStore.streamStateGetByPos(pState, pPos, (void**)&pRow);
if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) { if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
ASSERT(pBlock->info.rows > 0); ASSERT(pBlock->info.rows > 0);
break; break;
@ -2325,7 +2329,7 @@ int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, void* pState, SSDa
pGroupResInfo->index += 1; pGroupResInfo->index += 1;
continue; continue;
} }
doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowEntryOffset); doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowEntryOffset);
// no results, continue to check the next one // no results, continue to check the next one
if (pRow->numOfRows == 0) { if (pRow->numOfRows == 0) {
@ -2409,7 +2413,7 @@ void getMaxTsWins(const SArray* pAllWins, SArray* pMaxWins) {
return; return;
} }
SResultWindowInfo* pWinInfo = taosArrayGet(pAllWins, size - 1); SResultWindowInfo* pWinInfo = taosArrayGet(pAllWins, size - 1);
SSessionKey* pSeKey = pWinInfo->pStatePos->pKey; SSessionKey* pSeKey = pWinInfo->pStatePos->pKey;
taosArrayPush(pMaxWins, pSeKey); taosArrayPush(pMaxWins, pSeKey);
if (pSeKey->groupId == 0) { if (pSeKey->groupId == 0) {
return; return;
@ -2716,7 +2720,8 @@ void resetWinRange(STimeWindow* winRange) {
void getSessionWindowInfoByKey(SStreamAggSupporter* pAggSup, SSessionKey* pKey, SResultWindowInfo* pWinInfo) { void getSessionWindowInfoByKey(SStreamAggSupporter* pAggSup, SSessionKey* pKey, SResultWindowInfo* pWinInfo) {
int32_t rowSize = pAggSup->resultRowSize; int32_t rowSize = pAggSup->resultRowSize;
int32_t code = pAggSup->stateStore.streamStateSessionGet(pAggSup->pState, pKey, (void**)&pWinInfo->pStatePos, &rowSize); int32_t code =
pAggSup->stateStore.streamStateSessionGet(pAggSup->pState, pKey, (void**)&pWinInfo->pStatePos, &rowSize);
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
pWinInfo->sessionWin = *pKey; pWinInfo->sessionWin = *pKey;
pWinInfo->isOutput = true; pWinInfo->isOutput = true;
@ -2730,16 +2735,16 @@ void getSessionWindowInfoByKey(SStreamAggSupporter* pAggSup, SSessionKey* pKey,
void streamSessionSemiReloadState(SOperatorInfo* pOperator) { void streamSessionSemiReloadState(SOperatorInfo* pOperator) {
SStreamSessionAggOperatorInfo* pInfo = pOperator->info; SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
resetWinRange(&pAggSup->winRange); resetWinRange(&pAggSup->winRange);
SResultWindowInfo winInfo = {0}; SResultWindowInfo winInfo = {0};
int32_t size = 0; int32_t size = 0;
void* pBuf = NULL; void* pBuf = NULL;
int32_t code = pAggSup->stateStore.streamStateGetInfo(pAggSup->pState, STREAM_SESSION_OP_STATE_NAME, int32_t code = pAggSup->stateStore.streamStateGetInfo(pAggSup->pState, STREAM_SESSION_OP_STATE_NAME,
strlen(STREAM_SESSION_OP_STATE_NAME), &pBuf, &size); strlen(STREAM_SESSION_OP_STATE_NAME), &pBuf, &size);
int32_t num = (size - sizeof(TSKEY)) / sizeof(SSessionKey); int32_t num = (size - sizeof(TSKEY)) / sizeof(SSessionKey);
SSessionKey* pSeKeyBuf = (SSessionKey*) pBuf; SSessionKey* pSeKeyBuf = (SSessionKey*)pBuf;
ASSERT(size == num * sizeof(SSessionKey) + sizeof(TSKEY)); ASSERT(size == num * sizeof(SSessionKey) + sizeof(TSKEY));
for (int32_t i = 0; i < num; i++) { for (int32_t i = 0; i < num; i++) {
SResultWindowInfo winInfo = {0}; SResultWindowInfo winInfo = {0};
@ -2763,12 +2768,12 @@ void streamSessionReloadState(SOperatorInfo* pOperator) {
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
resetWinRange(&pAggSup->winRange); resetWinRange(&pAggSup->winRange);
int32_t size = 0; int32_t size = 0;
void* pBuf = NULL; void* pBuf = NULL;
int32_t code = pAggSup->stateStore.streamStateGetInfo(pAggSup->pState, STREAM_SESSION_OP_STATE_NAME, int32_t code = pAggSup->stateStore.streamStateGetInfo(pAggSup->pState, STREAM_SESSION_OP_STATE_NAME,
strlen(STREAM_SESSION_OP_STATE_NAME), &pBuf, &size); strlen(STREAM_SESSION_OP_STATE_NAME), &pBuf, &size);
int32_t num = (size - sizeof(TSKEY)) / sizeof(SSessionKey); int32_t num = (size - sizeof(TSKEY)) / sizeof(SSessionKey);
SSessionKey* pSeKeyBuf = (SSessionKey*)pBuf; SSessionKey* pSeKeyBuf = (SSessionKey*)pBuf;
ASSERT(size == num * sizeof(SSessionKey) + sizeof(TSKEY)); ASSERT(size == num * sizeof(SSessionKey) + sizeof(TSKEY));
TSKEY ts = *(TSKEY*)((char*)pBuf + size - sizeof(TSKEY)); TSKEY ts = *(TSKEY*)((char*)pBuf + size - sizeof(TSKEY));
@ -2887,7 +2892,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
pInfo->recvGetAll = false; pInfo->recvGetAll = false;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION; pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
// for stream // for stream
void* buff = NULL; void* buff = NULL;
int32_t len = 0; int32_t len = 0;
int32_t res = int32_t res =
@ -2924,7 +2929,8 @@ static void clearStreamSessionOperator(SStreamSessionAggOperatorInfo* pInfo) {
pInfo->streamAggSup.stateStore.streamStateSessionClear(pInfo->streamAggSup.pState); pInfo->streamAggSup.stateStore.streamStateSessionClear(pInfo->streamAggSup.pState);
} }
void deleteSessionWinState(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SSHashObj* pMapUpdate, SSHashObj* pMapDelete) { void deleteSessionWinState(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SSHashObj* pMapUpdate,
SSHashObj* pMapDelete) {
SArray* pWins = taosArrayInit(16, sizeof(SSessionKey)); SArray* pWins = taosArrayInit(16, sizeof(SSessionKey));
doDeleteTimeWindows(pAggSup, pBlock, pWins); doDeleteTimeWindows(pAggSup, pBlock, pWins);
removeSessionResults(pAggSup, pMapUpdate, pWins); removeSessionResults(pAggSup, pMapUpdate, pWins);
@ -3023,7 +3029,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
copyUpdateResult(&pInfo->pStUpdated, pInfo->pUpdated, sessionKeyCompareAsc); copyUpdateResult(&pInfo->pStUpdated, pInfo->pUpdated, sessionKeyCompareAsc);
removeSessionDeleteResults(pInfo->pStDeleted, pInfo->pUpdated); removeSessionDeleteResults(pInfo->pStDeleted, pInfo->pUpdated);
if(pInfo->isHistoryOp) { if (pInfo->isHistoryOp) {
getMaxTsWins(pInfo->pUpdated, pInfo->historyWins); getMaxTsWins(pInfo->pUpdated, pInfo->historyWins);
} }
@ -3057,8 +3063,9 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream
pOperator->operatorType = pPhyNode->type; pOperator->operatorType = pPhyNode->type;
if (pPhyNode->type != QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) { if (pPhyNode->type != QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION) {
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamSessionSemiAgg, NULL, pOperator->fpSet =
destroyStreamSessionAggOperatorInfo, optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); createOperatorFpSet(optrDummyOpenFn, doStreamSessionSemiAgg, NULL, destroyStreamSessionAggOperatorInfo,
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
setOperatorStreamStateFn(pOperator, streamSessionReleaseState, streamSessionSemiReloadState); setOperatorStreamStateFn(pOperator, streamSessionReleaseState, streamSessionSemiReloadState);
} }
setOperatorInfo(pOperator, getStreamOpName(pOperator->operatorType), pPhyNode->type, false, OP_NOT_OPENED, pInfo, setOperatorInfo(pOperator, getStreamOpName(pOperator->operatorType), pPhyNode->type, false, OP_NOT_OPENED, pInfo,
@ -3174,7 +3181,7 @@ void getStateWindowInfoByKey(SStreamAggSupporter* pAggSup, SSessionKey* pKey, SS
pAggSup->stateStore.streamStateSessionSeekKeyNext(pAggSup->pState, &pNextWin->winInfo.sessionWin); pAggSup->stateStore.streamStateSessionSeekKeyNext(pAggSup->pState, &pNextWin->winInfo.sessionWin);
int32_t nextSize = pAggSup->resultRowSize; int32_t nextSize = pAggSup->resultRowSize;
int32_t code = pAggSup->stateStore.streamStateSessionGetKVByCur(pCur, &pNextWin->winInfo.sessionWin, int32_t code = pAggSup->stateStore.streamStateSessionGetKVByCur(pCur, &pNextWin->winInfo.sessionWin,
(void**)&pNextWin->winInfo.pStatePos, &nextSize); (void**)&pNextWin->winInfo.pStatePos, &nextSize);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
SET_SESSION_WIN_INVALID(pNextWin->winInfo); SET_SESSION_WIN_INVALID(pNextWin->winInfo);
} else { } else {
@ -3187,8 +3194,8 @@ void getStateWindowInfoByKey(SStreamAggSupporter* pAggSup, SSessionKey* pKey, SS
pNextWin->winInfo.isOutput = true; pNextWin->winInfo.isOutput = true;
} }
pAggSup->stateStore.streamStateFreeCur(pCur); pAggSup->stateStore.streamStateFreeCur(pCur);
qDebug("===stream===get state next win buff. skey:%" PRId64 ", endkey:%" PRId64, pNextWin->winInfo.sessionWin.win.skey, qDebug("===stream===get state next win buff. skey:%" PRId64 ", endkey:%" PRId64,
pNextWin->winInfo.sessionWin.win.ekey); pNextWin->winInfo.sessionWin.win.skey, pNextWin->winInfo.sessionWin.win.ekey);
} }
void setStateOutputBuf(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_t groupId, char* pKeyData, void setStateOutputBuf(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_t groupId, char* pKeyData,
@ -3257,13 +3264,13 @@ void setStateOutputBuf(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_t groupId,
pNextWin->winInfo.isOutput = true; pNextWin->winInfo.isOutput = true;
} }
pAggSup->stateStore.streamStateFreeCur(pCur); pAggSup->stateStore.streamStateFreeCur(pCur);
qDebug("===stream===set state next win buff. skey:%" PRId64 ", endkey:%" PRId64, pNextWin->winInfo.sessionWin.win.skey, qDebug("===stream===set state next win buff. skey:%" PRId64 ", endkey:%" PRId64,
pNextWin->winInfo.sessionWin.win.ekey); pNextWin->winInfo.sessionWin.win.skey, pNextWin->winInfo.sessionWin.win.ekey);
} }
int32_t updateStateWindowInfo(SStreamAggSupporter* pAggSup, SStateWindowInfo* pWinInfo, SStateWindowInfo* pNextWin, TSKEY* pTs, uint64_t groupId, int32_t updateStateWindowInfo(SStreamAggSupporter* pAggSup, SStateWindowInfo* pWinInfo, SStateWindowInfo* pNextWin,
SColumnInfoData* pKeyCol, int32_t rows, int32_t start, bool* allEqual, TSKEY* pTs, uint64_t groupId, SColumnInfoData* pKeyCol, int32_t rows, int32_t start,
SSHashObj* pResultRows, SSHashObj* pSeUpdated, SSHashObj* pSeDeleted) { bool* allEqual, SSHashObj* pResultRows, SSHashObj* pSeUpdated, SSHashObj* pSeDeleted) {
*allEqual = true; *allEqual = true;
for (int32_t i = start; i < rows; ++i) { for (int32_t i = start; i < rows; ++i) {
char* pKeyData = colDataGetData(pKeyCol, i); char* pKeyData = colDataGetData(pKeyCol, i);
@ -3338,7 +3345,7 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
SStateWindowInfo nextWin = {0}; SStateWindowInfo nextWin = {0};
setStateOutputBuf(pAggSup, tsCols[i], groupId, pKeyData, &curWin, &nextWin); setStateOutputBuf(pAggSup, tsCols[i], groupId, pKeyData, &curWin, &nextWin);
releaseOutputBuf(pAggSup->pState, nextWin.winInfo.pStatePos, &pAPI->stateStore); releaseOutputBuf(pAggSup->pState, nextWin.winInfo.pStatePos, &pAPI->stateStore);
setSessionWinOutputInfo(pSeUpdated, &curWin.winInfo); setSessionWinOutputInfo(pSeUpdated, &curWin.winInfo);
winRows = updateStateWindowInfo(pAggSup, &curWin, &nextWin, tsCols, groupId, pKeyColInfo, rows, i, &allEqual, winRows = updateStateWindowInfo(pAggSup, &curWin, &nextWin, tsCols, groupId, pKeyColInfo, rows, i, &allEqual,
pAggSup->pResultRows, pSeUpdated, pStDeleted); pAggSup->pResultRows, pSeUpdated, pStDeleted);
@ -3475,6 +3482,7 @@ void doStreamStateSaveCheckpoint(SOperatorInfo* pOperator) {
len = doStreamStateEncodeOpState(&pBuf, len, pOperator, true); len = doStreamStateEncodeOpState(&pBuf, len, pOperator, true);
pInfo->streamAggSup.stateStore.streamStateSaveInfo(pInfo->streamAggSup.pState, STREAM_STATE_OP_CHECKPOINT_NAME, pInfo->streamAggSup.stateStore.streamStateSaveInfo(pInfo->streamAggSup.pState, STREAM_STATE_OP_CHECKPOINT_NAME,
strlen(STREAM_STATE_OP_CHECKPOINT_NAME), buf, len); strlen(STREAM_STATE_OP_CHECKPOINT_NAME), buf, len);
taosMemoryFree(buf);
} }
static SSDataBlock* buildStateResult(SOperatorInfo* pOperator) { static SSDataBlock* buildStateResult(SOperatorInfo* pOperator) {
@ -3614,10 +3622,11 @@ void streamStateReleaseState(SOperatorInfo* pOperator) {
static void compactStateWindow(SOperatorInfo* pOperator, SResultWindowInfo* pCurWin, SResultWindowInfo* pNextWin, static void compactStateWindow(SOperatorInfo* pOperator, SResultWindowInfo* pCurWin, SResultWindowInfo* pNextWin,
SSHashObj* pStUpdated, SSHashObj* pStDeleted) { SSHashObj* pStUpdated, SSHashObj* pStDeleted) {
SExprSupp* pSup = &pOperator->exprSupp; SExprSupp* pSup = &pOperator->exprSupp;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStreamStateAggOperatorInfo* pInfo = pOperator->info; SStreamStateAggOperatorInfo* pInfo = pOperator->info;
compactTimeWindow(pSup, &pInfo->streamAggSup, &pInfo->twAggSup, pTaskInfo, pCurWin, pNextWin, pStUpdated, pStDeleted, false); compactTimeWindow(pSup, &pInfo->streamAggSup, &pInfo->twAggSup, pTaskInfo, pCurWin, pNextWin, pStUpdated, pStDeleted,
false);
} }
void streamStateReloadState(SOperatorInfo* pOperator) { void streamStateReloadState(SOperatorInfo* pOperator) {
@ -3629,7 +3638,7 @@ void streamStateReloadState(SOperatorInfo* pOperator) {
int32_t size = 0; int32_t size = 0;
void* pBuf = NULL; void* pBuf = NULL;
int32_t code = pAggSup->stateStore.streamStateGetInfo(pAggSup->pState, STREAM_STATE_OP_STATE_NAME, int32_t code = pAggSup->stateStore.streamStateGetInfo(pAggSup->pState, STREAM_STATE_OP_STATE_NAME,
strlen(STREAM_STATE_OP_STATE_NAME), &pBuf, &size); strlen(STREAM_STATE_OP_STATE_NAME), &pBuf, &size);
int32_t num = (size - sizeof(TSKEY)) / sizeof(SSessionKey); int32_t num = (size - sizeof(TSKEY)) / sizeof(SSessionKey);
qDebug("===stream=== reload state. get result count:%d", num); qDebug("===stream=== reload state. get result count:%d", num);
SSessionKey* pSeKeyBuf = (SSessionKey*)pBuf; SSessionKey* pSeKeyBuf = (SSessionKey*)pBuf;
@ -4010,8 +4019,9 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
setOperatorInfo(pOperator, "StreamIntervalOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, true, OP_NOT_OPENED, setOperatorInfo(pOperator, "StreamIntervalOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, true, OP_NOT_OPENED,
pInfo, pTaskInfo); pInfo, pTaskInfo);
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamIntervalAgg, NULL, pOperator->fpSet =
destroyStreamFinalIntervalOperatorInfo, optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL); createOperatorFpSet(optrDummyOpenFn, doStreamIntervalAgg, NULL, destroyStreamFinalIntervalOperatorInfo,
optrDefaultBufFn, NULL, optrDefaultGetNextExtFn, NULL);
setOperatorStreamStateFn(pOperator, streamIntervalReleaseState, streamIntervalReloadState); setOperatorStreamStateFn(pOperator, streamIntervalReleaseState, streamIntervalReloadState);
pInfo->stateStore = pTaskInfo->storageAPI.stateStore; pInfo->stateStore = pTaskInfo->storageAPI.stateStore;

View File

@ -916,6 +916,7 @@ void nodesDestroyNode(SNode* pNode) {
taosHashCleanup(pStmt->pSubTableHashObj); taosHashCleanup(pStmt->pSubTableHashObj);
taosHashCleanup(pStmt->pTableNameHashObj); taosHashCleanup(pStmt->pTableNameHashObj);
taosHashCleanup(pStmt->pDbFNameHashObj); taosHashCleanup(pStmt->pDbFNameHashObj);
taosHashCleanup(pStmt->pTableCxtHashObj);
if (pStmt->freeHashFunc) { if (pStmt->freeHashFunc) {
pStmt->freeHashFunc(pStmt->pTableBlockHashObj); pStmt->freeHashFunc(pStmt->pTableBlockHashObj);
} }

View File

@ -50,7 +50,7 @@ void insCheckTableDataOrder(STableDataCxt *pTableCxt, TSKEY tsKey);
int32_t insGetTableDataCxt(SHashObj *pHash, void *id, int32_t idLen, STableMeta *pTableMeta, int32_t insGetTableDataCxt(SHashObj *pHash, void *id, int32_t idLen, STableMeta *pTableMeta,
SVCreateTbReq **pCreateTbReq, STableDataCxt **pTableCxt, bool colMode, bool ignoreColVals); SVCreateTbReq **pCreateTbReq, STableDataCxt **pTableCxt, bool colMode, bool ignoreColVals);
int32_t initTableColSubmitData(STableDataCxt *pTableCxt); int32_t initTableColSubmitData(STableDataCxt *pTableCxt);
int32_t insMergeTableDataCxt(SHashObj *pTableHash, SArray **pVgDataBlocks); int32_t insMergeTableDataCxt(SHashObj *pTableHash, SArray **pVgDataBlocks, bool isRebuild);
int32_t insBuildVgDataBlocks(SHashObj *pVgroupsHashObj, SArray *pVgDataBlocks, SArray **pDataBlocks); int32_t insBuildVgDataBlocks(SHashObj *pVgroupsHashObj, SArray *pVgDataBlocks, SArray **pDataBlocks);
void insDestroyTableDataCxtHashMap(SHashObj *pTableCxtHash); void insDestroyTableDataCxtHashMap(SHashObj *pTableCxtHash);
void insDestroyVgroupDataCxt(SVgroupDataCxt *pVgCxt); void insDestroyVgroupDataCxt(SVgroupDataCxt *pVgCxt);

View File

@ -425,7 +425,7 @@ SQuery* smlInitHandle() {
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash) { int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash) {
SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)(handle)->pRoot; SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)(handle)->pRoot;
// merge according to vgId // merge according to vgId
int32_t code = insMergeTableDataCxt(pStmt->pTableBlockHashObj, &pStmt->pVgDataBlocks); int32_t code = insMergeTableDataCxt(pStmt->pTableBlockHashObj, &pStmt->pVgDataBlocks, true);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
uError("insMergeTableDataCxt failed"); uError("insMergeTableDataCxt failed");
return code; return code;

View File

@ -55,6 +55,7 @@ typedef struct SInsertParseContext {
bool usingDuplicateTable; bool usingDuplicateTable;
bool forceUpdate; bool forceUpdate;
bool needTableTagVal; bool needTableTagVal;
bool needRequest; // whether or not request server
} SInsertParseContext; } SInsertParseContext;
typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param); typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param);
@ -652,6 +653,10 @@ static int32_t parseTagValue(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStm
} }
static int32_t buildCreateTbReq(SVnodeModifyOpStmt* pStmt, STag* pTag, SArray* pTagName) { static int32_t buildCreateTbReq(SVnodeModifyOpStmt* pStmt, STag* pTag, SArray* pTagName) {
if (pStmt->pCreateTblReq) {
tdDestroySVCreateTbReq(pStmt->pCreateTblReq);
taosMemoryFreeClear(pStmt->pCreateTblReq);
}
pStmt->pCreateTblReq = taosMemoryCalloc(1, sizeof(SVCreateTbReq)); pStmt->pCreateTblReq = taosMemoryCalloc(1, sizeof(SVCreateTbReq));
if (NULL == pStmt->pCreateTblReq) { if (NULL == pStmt->pCreateTblReq) {
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
@ -1797,9 +1802,10 @@ static void clearStbRowsDataContext(SStbRowsDataContext* pStbRowsCxt) {
taosMemoryFreeClear(pStbRowsCxt->pCreateCtbReq); taosMemoryFreeClear(pStbRowsCxt->pCreateCtbReq);
} }
static int32_t parseOneStbRow(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, const char** ppSql, static int32_t parseOneStbRow(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, const char** ppSql,
SStbRowsDataContext* pStbRowsCxt, bool* pGotRow, SToken* pToken) { SStbRowsDataContext* pStbRowsCxt, bool* pGotRow, SToken* pToken,
bool bFirstTable = false; STableDataCxt** ppTableDataCxt) {
bool bFirstTable = false;
int32_t code = getStbRowValues(pCxt, pStmt, ppSql, pStbRowsCxt, pGotRow, pToken, &bFirstTable); int32_t code = getStbRowValues(pCxt, pStmt, ppSql, pStbRowsCxt, pGotRow, pToken, &bFirstTable);
if (code != TSDB_CODE_SUCCESS || !*pGotRow) { if (code != TSDB_CODE_SUCCESS || !*pGotRow) {
return code; return code;
@ -1809,15 +1815,14 @@ static int32_t parseOneStbRow(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pSt
code = processCtbAutoCreationAndCtbMeta(pCxt, pStmt, pStbRowsCxt); code = processCtbAutoCreationAndCtbMeta(pCxt, pStmt, pStbRowsCxt);
} }
STableDataCxt* pTableDataCxt = NULL;
code = insGetTableDataCxt(pStmt->pTableBlockHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid), code = insGetTableDataCxt(pStmt->pTableBlockHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid),
pStbRowsCxt->pCtbMeta, &pStbRowsCxt->pCreateCtbReq, &pTableDataCxt, false, true); pStbRowsCxt->pCtbMeta, &pStbRowsCxt->pCreateCtbReq, ppTableDataCxt, false, true);
initTableColSubmitData(pTableDataCxt); initTableColSubmitData(*ppTableDataCxt);
if (code == TSDB_CODE_SUCCESS) { if (code == TSDB_CODE_SUCCESS) {
SRow** pRow = taosArrayReserve(pTableDataCxt->pData->aRowP, 1); SRow** pRow = taosArrayReserve((*ppTableDataCxt)->pData->aRowP, 1);
code = tRowBuild(pStbRowsCxt->aColVals, pTableDataCxt->pSchema, pRow); code = tRowBuild(pStbRowsCxt->aColVals, (*ppTableDataCxt)->pSchema, pRow);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
insCheckTableDataOrder(pTableDataCxt, TD_ROW_KEY(*pRow)); insCheckTableDataOrder(*ppTableDataCxt, TD_ROW_KEY(*pRow));
} }
} }
@ -1915,7 +1920,8 @@ static int32_t parseValues(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt,
if (!pStmt->stbSyntax) { if (!pStmt->stbSyntax) {
code = parseOneRow(pCxt, &pStmt->pSql, rowsDataCxt.pTableDataCxt, &gotRow, pToken); code = parseOneRow(pCxt, &pStmt->pSql, rowsDataCxt.pTableDataCxt, &gotRow, pToken);
} else { } else {
code = parseOneStbRow(pCxt, pStmt, &pStmt->pSql, rowsDataCxt.pStbRowsCxt, &gotRow, pToken); STableDataCxt* pTableDataCxt = NULL;
code = parseOneStbRow(pCxt, pStmt, &pStmt->pSql, rowsDataCxt.pStbRowsCxt, &gotRow, pToken, &pTableDataCxt);
} }
} }
@ -1979,7 +1985,14 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt
if (!pStmt->stbSyntax) { if (!pStmt->stbSyntax) {
code = parseOneRow(pCxt, (const char**)&pRow, rowsDataCxt.pTableDataCxt, &gotRow, &token); code = parseOneRow(pCxt, (const char**)&pRow, rowsDataCxt.pTableDataCxt, &gotRow, &token);
} else { } else {
code = parseOneStbRow(pCxt, pStmt, (const char**)&pRow, rowsDataCxt.pStbRowsCxt, &gotRow, &token); STableDataCxt* pTableDataCxt = NULL;
code = parseOneStbRow(pCxt, pStmt, (const char**)&pRow, rowsDataCxt.pStbRowsCxt, &gotRow, &token, &pTableDataCxt);
if (code == TSDB_CODE_SUCCESS) {
SStbRowsDataContext* pStbRowsCxt = rowsDataCxt.pStbRowsCxt;
void* pData = pTableDataCxt;
taosHashPut(pStmt->pTableCxtHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid), &pData,
POINTER_BYTES);
}
} }
if (code && firstLine) { if (code && firstLine) {
firstLine = false; firstLine = false;
@ -1992,7 +2005,7 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt
(*pNumOfRows)++; (*pNumOfRows)++;
} }
if (TSDB_CODE_SUCCESS == code && (*pNumOfRows) > tsMaxInsertBatchRows) { if (TSDB_CODE_SUCCESS == code && (*pNumOfRows) >= tsMaxInsertBatchRows) {
pStmt->fileProcessing = true; pStmt->fileProcessing = true;
break; break;
} }
@ -2003,7 +2016,7 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt
parserDebug("0x%" PRIx64 " %d rows have been parsed", pCxt->pComCxt->requestId, *pNumOfRows); parserDebug("0x%" PRIx64 " %d rows have been parsed", pCxt->pComCxt->requestId, *pNumOfRows);
if (TSDB_CODE_SUCCESS == code && 0 == (*pNumOfRows) && if (TSDB_CODE_SUCCESS == code && 0 == (*pNumOfRows) && 0 == pStmt->totalRowsNum &&
(!TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT)) && !pStmt->fileProcessing) { (!TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT)) && !pStmt->fileProcessing) {
code = buildSyntaxErrMsg(&pCxt->msg, "no any data points", NULL); code = buildSyntaxErrMsg(&pCxt->msg, "no any data points", NULL);
} }
@ -2011,6 +2024,12 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt
} }
static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, SRowsDataContext rowsDataCxt) { static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, SRowsDataContext rowsDataCxt) {
// init only for file
if (NULL == pStmt->pTableCxtHashObj) {
pStmt->pTableCxtHashObj =
taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
}
int32_t numOfRows = 0; int32_t numOfRows = 0;
int32_t code = parseCsvFile(pCxt, pStmt, rowsDataCxt, &numOfRows); int32_t code = parseCsvFile(pCxt, pStmt, rowsDataCxt, &numOfRows);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
@ -2022,7 +2041,18 @@ static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifyOpSt
} else { } else {
parserDebug("0x%" PRIx64 " insert from csv. File is too large, do it in batches.", pCxt->pComCxt->requestId); parserDebug("0x%" PRIx64 " insert from csv. File is too large, do it in batches.", pCxt->pComCxt->requestId);
} }
if (pStmt->insertType != TSDB_QUERY_TYPE_FILE_INSERT) {
return buildSyntaxErrMsg(&pCxt->msg, "keyword VALUES or FILE is exclusive", NULL);
}
} }
// just record pTableCxt whose data come from file
if (!pStmt->stbSyntax && numOfRows > 0) {
void* pData = rowsDataCxt.pTableDataCxt;
taosHashPut(pStmt->pTableCxtHashObj, &pStmt->pTableMeta->uid, sizeof(pStmt->pTableMeta->uid), &pData,
POINTER_BYTES);
}
return code; return code;
} }
@ -2061,6 +2091,9 @@ static int32_t parseDataClause(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS
NEXT_TOKEN(pStmt->pSql, token); NEXT_TOKEN(pStmt->pSql, token);
switch (token.type) { switch (token.type) {
case TK_VALUES: case TK_VALUES:
if (TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_FILE_INSERT)) {
return buildSyntaxErrMsg(&pCxt->msg, "keyword VALUES or FILE is exclusive", token.z);
}
return parseValuesClause(pCxt, pStmt, rowsDataCxt, &token); return parseValuesClause(pCxt, pStmt, rowsDataCxt, &token);
case TK_FILE: case TK_FILE:
return parseFileClause(pCxt, pStmt, rowsDataCxt, &token); return parseFileClause(pCxt, pStmt, rowsDataCxt, &token);
@ -2275,8 +2308,25 @@ static int32_t parseInsertBodyBottom(SInsertParseContext* pCxt, SVnodeModifyOpSt
return setStmtInfo(pCxt, pStmt); return setStmtInfo(pCxt, pStmt);
} }
// release old array alloced by merge
pStmt->freeArrayFunc(pStmt->pVgDataBlocks);
pStmt->pVgDataBlocks = NULL;
bool fileOnly = (pStmt->insertType == TSDB_QUERY_TYPE_FILE_INSERT);
if (fileOnly) {
// none data, skip merge & buildvgdata
if (0 == taosHashGetSize(pStmt->pTableCxtHashObj)) {
pCxt->needRequest = false;
return TSDB_CODE_SUCCESS;
}
}
// merge according to vgId // merge according to vgId
int32_t code = insMergeTableDataCxt(pStmt->pTableBlockHashObj, &pStmt->pVgDataBlocks); int32_t code = insMergeTableDataCxt(fileOnly ? pStmt->pTableCxtHashObj : pStmt->pTableBlockHashObj,
&pStmt->pVgDataBlocks, pStmt->fileProcessing);
// clear tmp hashobj only
taosHashClear(pStmt->pTableCxtHashObj);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = insBuildVgDataBlocks(pStmt->pVgroupsHashObj, pStmt->pVgDataBlocks, &pStmt->pDataBlocks); code = insBuildVgDataBlocks(pStmt->pVgroupsHashObj, pStmt->pVgDataBlocks, &pStmt->pDataBlocks);
} }
@ -2718,6 +2768,7 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal
.msg = {.buf = pCxt->pMsg, .len = pCxt->msgLen}, .msg = {.buf = pCxt->pMsg, .len = pCxt->msgLen},
.missCache = false, .missCache = false,
.usingDuplicateTable = false, .usingDuplicateTable = false,
.needRequest = true,
.forceUpdate = (NULL != pCatalogReq ? pCatalogReq->forceUpdate : false)}; .forceUpdate = (NULL != pCatalogReq ? pCatalogReq->forceUpdate : false)};
int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery); int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery);
@ -2732,5 +2783,10 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal
code = setRefreshMeta(*pQuery); code = setRefreshMeta(*pQuery);
} }
insDestroyBoundColInfo(&context.tags); insDestroyBoundColInfo(&context.tags);
// if no data to insert, set emptyMode to avoid request server
if (!context.needRequest) {
(*pQuery)->execMode = QUERY_EXEC_MODE_EMPTY_RESULT;
}
return code; return code;
} }

Some files were not shown because too many files have changed in this diff Show More