fix:conflicts from 3.0

This commit is contained in:
wangmm0220 2023-12-08 15:58:52 +08:00
commit d1c2a03dc7
266 changed files with 11774 additions and 5440 deletions

1
.gitignore vendored
View File

@ -11,6 +11,7 @@ CMakeSettings.json
cmake-build-debug/
cmake-build-release/
cscope.out
cscope.files
.DS_Store
debug/
release/

View File

@ -355,7 +355,7 @@ pipeline {
}
parallel {
stage('check docs') {
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_52 || worker03 || slave215 || slave217 || slave219 || Mac_catalina "}
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 || Mac_catalina "}
steps {
check_docs()
}
@ -393,7 +393,7 @@ pipeline {
agent{label " Mac_catalina "}
steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 20, unit: 'MINUTES'){
timeout(time: 30, unit: 'MINUTES'){
pre_test()
pre_test_build_mac()
}
@ -401,7 +401,7 @@ pipeline {
}
}
stage('linux test') {
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_52 || worker03 || slave215 || slave217 || slave219 "}
agent{label " slave1_47 || slave1_48 || slave1_49 || slave1_50 || slave1_52 || slave1_59 || slave1_63 || worker03 || slave215 || slave217 || slave219 "}
options { skipDefaultCheckout() }
when {
changeRequest()

View File

@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE FALSE)
set(CMAKE_VERBOSE_MAKEFILE TRUE)
set(TD_BUILD_TAOSA_INTERNAL FALSE)
#set output directory
@ -159,6 +159,7 @@ ELSE ()
CHECK_C_COMPILER_FLAG("-mavx2" COMPILER_SUPPORT_AVX2)
CHECK_C_COMPILER_FLAG("-mavx512f" COMPILER_SUPPORT_AVX512F)
CHECK_C_COMPILER_FLAG("-mavx512vbmi" COMPILER_SUPPORT_AVX512BMI)
CHECK_C_COMPILER_FLAG("-mavx512vl" COMPILER_SUPPORT_AVX512VL)
IF (COMPILER_SUPPORT_SSE42)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse4.2")
@ -166,11 +167,11 @@ ELSE ()
ENDIF()
IF ("${SIMD_SUPPORT}" MATCHES "true")
IF (COMPILER_SUPPORT_FMA)
IF (COMPILER_SUPPORT_FMA)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfma")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfma")
ENDIF()
IF (COMPILER_SUPPORT_AVX)
ENDIF()
IF (COMPILER_SUPPORT_AVX)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")
ENDIF()
@ -180,11 +181,17 @@ ELSE ()
ENDIF()
MESSAGE(STATUS "SIMD instructions (FMA/AVX/AVX2) is ACTIVATED")
IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
MESSAGE(STATUS "avx512 supported by gcc")
ENDIF()
# IF (COMPILER_SUPPORT_AVX512F AND COMPILER_SUPPORT_AVX512BMI)
# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512f -mavx512vbmi")
# SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512f -mavx512vbmi")
# MESSAGE(STATUS "avx512f/avx512bmi supported by compiler")
# ENDIF()
#
# IF (COMPILER_SUPPORT_AVX512VL)
# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mavx512vl")
# SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512vl")
# MESSAGE(STATUS "avx512vl supported by compiler")
# ENDIF()
ENDIF()
# build mode

View File

@ -151,6 +151,7 @@ IF(${BUILD_S3})
IF(${BUILD_WITH_S3})
add_definitions(-DUSE_S3)
option(BUILD_WITH_COS "If build with cos" OFF)
ELSE ()

View File

@ -243,7 +243,7 @@ You can use the TDengine CLI to monitor your TDengine deployment and execute ad
taos
```
The TDengine CLI displays a welcome message and version information to indicate that its connection to the TDengine service was successful. If an error message is displayed, see the [FAQ](/train-faq/faq) for troubleshooting information. At the following prompt, you can execute SQL commands.
The TDengine CLI displays a welcome message and version information to indicate that its connection to the TDengine service was successful. If an error message is displayed, see the [FAQ](../../train-faq/faq) for troubleshooting information. At the following prompt, you can execute SQL commands.
```cmd
taos>

View File

@ -10,7 +10,7 @@ Between official releases, beta versions may be released that contain new featur
<PkgList type={0}/>
For information about installing TDengine, see [Install and Uninstall](/operation/pkg-install).
For information about installing TDengine, see [Install and Uninstall](../../operation/pkg-install).
For information about TDengine releases, see [All Downloads](https://tdengine.com/all-downloads)

View File

@ -12,7 +12,7 @@ import StackOverflowSVG from './stackoverflow.svg'
You can install and run TDengine on Linux/Windows/macOS machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud.
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter).
The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](../reference/rest-api) through [taosAdapter](../reference/taosadapter).
```mdx-code-block
import DocCardList from '@theme/DocCardList';

View File

@ -12,4 +12,4 @@ When using REST connection, the feature of bulk pulling can be enabled if the si
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
```
More configuration about connection, please refer to [Java Connector](/reference/connector/java)
More configuration about connection, please refer to [Java Connector](../../reference/connector/java)

View File

@ -22,7 +22,7 @@ import VerifyLinux from "../../14-reference/03-connector/_verify_linux.mdx";
import VerifyWindows from "../../14-reference/03-connector/_verify_windows.mdx";
import VerifyMacOS from "../../14-reference/03-connector/_verify_macos.mdx";
Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](/reference/rest-api/). Applications can also use the connectors for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These connectors support connecting to TDengine clusters using both native interfaces (taosc). Some connectors also support connecting over a REST interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
Any application running on any platform can access TDengine through the REST API provided by TDengine. For information, see [REST API](../../reference/rest-api/). Applications can also use the connectors for various programming languages, including C/C++, Java, Python, Go, Node.js, C#, and Rust, to access TDengine. These connectors support connecting to TDengine clusters using both native interfaces (taosc). Some connectors also support connecting over a REST interface. Community developers have also contributed several unofficial connectors, such as the ADO.NET connector, the Lua connector, and the PHP connector.
## Establish Connection
@ -36,7 +36,7 @@ For REST and native connections, connectors provide similar APIs for performing
Key differences:
3. The REST connection is more accessible with cross-platform support, however it results in a 30% performance downgrade.
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](/reference/connector/cpp#parameter-binding-api), [Subscription](/reference/connector/cpp#subscription-and-consumption-api), etc.
1. The TDengine client driver (taosc) has the highest performance with all the features of TDengine like [Parameter Binding](../../reference/connector/cpp#parameter-binding-api), [Subscription](../../reference/connector/cpp#subscription-and-consumption-api), etc.
## Install Client Driver taosc

View File

@ -3,9 +3,9 @@ title: Data Model
description: This document describes the data model of TDengine.
---
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](/concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
The data model employed by TDengine is similar to that of a relational database. You have to create databases and tables. You must design the data model based on your own business and application requirements. You should design the [STable](../../concept/#super-table-stable) (an abbreviation for super table) schema to fit your data. This chapter will explain the big picture without getting into syntactical details.
Note: before you read this chapter, please make sure you have already read through [Key Concepts](/concept/), since TDengine introduces new concepts like "one table for one [data collection point](/concept/#data-collection-point)" and "[super table](/concept/#super-table-stable)".
Note: before you read this chapter, please make sure you have already read through [Key Concepts](../../concept/), since TDengine introduces new concepts like "one table for one [data collection point](../../concept/#data-collection-point)" and "[super table](../../concept/#super-table-stable)".
## Create Database
@ -22,7 +22,7 @@ In the above SQL statement:
- a new data file will be created every 10 days
- the size of the write cache pool on each VNode is 16 MB
- the number of vgroups is 100
- WAL is enabled but fsync is disabled For more details please refer to [Database](/taos-sql/database).
- WAL is enabled but fsync is disabled For more details please refer to [Database](../../taos-sql/database).
After creating a database, the current database in use can be switched using SQL command `USE`. For example the SQL statement below switches the current database to `power`.
@ -41,13 +41,13 @@ Without the current database specified, table name must be preceded with the cor
## Create STable
In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](/concept/#model_table1), the SQL statement below can be used to create the super table.
In a time-series application, there may be multiple kinds of data collection points. For example, in the electrical power system there are meters, transformers, bus bars, switches, etc. For easy and efficient aggregation of multiple tables, one STable needs to be created for each kind of data collection point. For example, for the meters in [table 1](../../concept/#model_table1), the SQL statement below can be used to create the super table.
```sql
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
```
Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](/taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](/taos-sql/stable) for more details.
Similar to creating a regular table, when creating a STable, the name and schema need to be provided. In the STable schema, the first column must always be a timestamp (like ts in the example), and the other columns (like current, voltage and phase in the example) are the data collected. The remaining columns can [contain data of type](../../taos-sql/data-type/) integer, float, double, string etc. In addition, the schema for tags, like location and groupId in the example, must be provided. The tag type can be integer, float, string, etc. Tags are essentially the static properties of a data collection point. For example, properties like the location, device type, device group ID, manager ID are tags. Tags in the schema can be added, removed or updated. Please refer to [STable](../../taos-sql/stable) for more details.
For each kind of data collection point, a corresponding STable must be created. There may be many STables in an application. For electrical power system, we need to create a STable respectively for meters, transformers, busbars, switches. There may be multiple kinds of data collection points on a single device, for example there may be one data collection point for electrical data like current and voltage and another data collection point for environmental data like temperature, humidity and wind direction. Multiple STables are required for these kinds of devices.
@ -61,7 +61,7 @@ A specific table needs to be created for each data collection point. Similar to
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
```
In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](/taos-sql/table) for details.
In the above SQL statement, "d1001" is the table name, "meters" is the STable name, followed by the value of tag "Location" and the value of tag "groupId", which are "California.SanFrancisco" and "2" respectively in the example. The tag values can be updated after the table is created. Please refer to [Tables](../../taos-sql/table) for details.
It's suggested to use the globally unique ID of a data collection point as the table name. For example the device serial number could be used as a unique ID. If a unique ID doesn't exist, multiple IDs that are not globally unique can be combined to form a globally unique ID. It's not recommended to use a globally unique ID as tag value.
@ -75,7 +75,7 @@ INSERT INTO d1001 USING meters TAGS ("California.SanFrancisco", 2) VALUES (now,
In the above SQL statement, a row with value `(now, 10.2, 219, 0.32)` will be inserted into table "d1001". If table "d1001" doesn't exist, it will be created automatically using STable "meters" as template with tag value `"California.SanFrancisco", 2`.
For more details please refer to [Create Table Automatically](/taos-sql/insert#automatically-create-table-when-inserting).
For more details please refer to [Create Table Automatically](../../taos-sql/insert#automatically-create-table-when-inserting).
## Single Column vs Multiple Column

View File

@ -33,7 +33,7 @@ The below SQL statement is used to insert one row into table "d1001".
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31);
```
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
`ts1` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](../../../taos-sql/insert).
### Insert Multiple Rows
@ -43,7 +43,7 @@ Multiple rows can be inserted in a single SQL statement. The example below inser
INSERT INTO d1001 VALUES (ts2, 10.2, 220, 0.23) (ts2, 10.3, 218, 0.25);
```
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
`ts1` and `ts2` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](../../../taos-sql/insert).
### Insert into Multiple Tables
@ -53,9 +53,9 @@ Data can be inserted into multiple tables in the same SQL statement. The example
INSERT INTO d1001 VALUES (ts1, 10.3, 219, 0.31) (ts2, 12.6, 218, 0.33) d1002 VALUES (ts3, 12.3, 221, 0.31);
```
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](/taos-sql/insert).
`ts1`, `ts2` and `ts3` is Unix timestamp, the timestamps which is larger than the difference between current time and KEEP in config is only allowed. For further detail, refer to [TDengine SQL insert timestamp section](../../../taos-sql/insert).
For more details about `INSERT` please refer to [INSERT](/taos-sql/insert).
For more details about `INSERT` please refer to [INSERT](../../../taos-sql/insert).
:::info

View File

@ -35,7 +35,7 @@ bin/kafka-topics.sh --bootstrap-server=localhost:9092 --describe
## Insert into TDengine
We can write data into TDengine via SQL or Schemaless. For more information, please refer to [Insert Using SQL](/develop/insert-data/sql-writing/) or [High Performance Writing](/develop/insert-data/high-volume/) or [Schemaless Writing](/reference/schemaless/).
We can write data into TDengine via SQL or Schemaless. For more information, please refer to [Insert Using SQL](../sql-writing/) or [High Performance Writing](../high-volume/) or [Schemaless Writing](../../../reference/schemaless/).
## Examples

View File

@ -46,7 +46,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
:::
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](../../../reference/schemaless/#Schemaless-Line-Protocol)
## Examples

View File

@ -128,7 +128,7 @@ For more information, see [Aggregate by Window](../../taos-sql/distinguished).
### Query
In the section describing [Insert](/develop/insert-data/sql-writing), a database named `power` is created and some data are inserted into STable `meters`. Below sample code demonstrates how to query the data in this STable.
In the section describing [Insert](../insert-data/sql-writing), a database named `power` is created and some data are inserted into STable `meters`. Below sample code demonstrates how to query the data in this STable.
<Tabs defaultValue="java" groupId="lang">
<TabItem label="Java" value="java">

View File

@ -6,7 +6,7 @@ description: This document describes how to use the various components of TDengi
Before creating an application to process time-series data with TDengine, consider the following:
1. Choose the method to connect to TDengine. TDengine offers a REST API that can be used with any programming language. It also has connectors for a variety of languages.
2. Design the data model based on your own use cases. Consider the main [concepts](/concept/) of TDengine, including "one table per data collection point" and the supertable. Learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you decide to create one or more databases and design a supertable schema that fit your data.
2. Design the data model based on your own use cases. Consider the main [concepts](../concept/) of TDengine, including "one table per data collection point" and the supertable. Learn about static labels, collected metrics, and subtables. Depending on the characteristics of your data and your requirements, you decide to create one or more databases and design a supertable schema that fit your data.
3. Decide how you will insert data. TDengine supports writing using standard SQL, but also supports schemaless writing, so that data can be written directly without creating tables manually.
4. Based on business requirements, find out what SQL query statements need to be written. You may be able to repurpose any existing SQL.
5. If you want to run real-time analysis based on time series data, including various dashboards, use the TDengine stream processing component instead of deploying complex systems such as Spark or Flink.
@ -14,7 +14,7 @@ Before creating an application to process time-series data with TDengine, consid
7. In many use cases (such as fleet management), the application needs to obtain the latest status of each data collection point. It is recommended that you use the cache function of TDengine instead of deploying Redis separately.
8. If you find that the SQL functions of TDengine cannot meet your requirements, then you can use user-defined functions to solve the problem.
This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](/taos-sql/). For a more in-depth understanding of the use of each connector, please read the [Connector Reference Guide](/reference/connector/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](/third-party/).
This section is organized in the order described above. For ease of understanding, TDengine provides sample code for each supported programming language for each function. If you want to learn more about the use of SQL, please read the [SQL manual](../taos-sql/). For a more in-depth understanding of the use of each connector, please read the [Connector Reference Guide](../reference/connector/). If you also want to integrate TDengine with third-party systems, such as Grafana, please refer to the [third-party tools](../third-party/).
If you encounter any problems during the development process, please click ["Submit an issue"](https://github.com/taosdata/TDengine/issues/new/choose) at the bottom of each page and submit it on GitHub right away.

View File

@ -72,7 +72,7 @@ For all the dnodes in a TDengine cluster, the below parameters must be configure
## Start Cluster
The first dnode can be started following the instructions in [Get Started](/get-started/). Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example:
The first dnode can be started following the instructions in [Get Started](../../get-started/). Then TDengine CLI `taos` can be launched to execute command `show dnodes`, the output is as following for example:
```
taos> show dnodes;
@ -90,7 +90,7 @@ From the above output, it is shown that the end point of the started dnode is "h
There are a few steps necessary to add other dnodes in the cluster.
Second, we can start `taosd` as instructed in [Get Started](/get-started/).
Second, we can start `taosd` as instructed in [Get Started](../../get-started/).
Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `taos` to execute the following command:

View File

@ -26,7 +26,6 @@ database_option: {
| PAGESIZE value
| PRECISION {'ms' | 'us' | 'ns'}
| REPLICA value
| RETENTIONS ingestion_duration:keep_duration ...
| WAL_LEVEL {1 | 2}
| VGROUPS value
| SINGLE_STABLE {0 | 1}
@ -61,7 +60,6 @@ database_option: {
- PAGESIZE: specifies the size (in KB) of each page in the metadata storage engine cache on each vnode. The default value is 4. Enter a value between 1 and 16384.
- PRECISION: specifies the precision at which a database records timestamps. Enter ms for milliseconds, us for microseconds, or ns for nanoseconds. The default value is ms.
- REPLICA: specifies the number of replicas that are made of the database. Enter 1 or 3. The default value is 1. The value of the REPLICA parameter cannot exceed the number of dnodes in the cluster.
- RETENTIONS: specifies the retention period for data aggregated at various intervals. For example, RETENTIONS 15s:7d,1m:21d,15m:50d indicates that data aggregated every 15 seconds is retained for 7 days, data aggregated every 1 minute is retained for 21 days, and data aggregated every 15 minutes is retained for 50 days. You must enter three aggregation intervals and corresponding retention periods.
- WAL_LEVEL: specifies whether fsync is enabled. The default value is 1.
- 1: WAL is enabled but fsync is disabled.
- 2: WAL and fsync are both enabled.

View File

@ -32,9 +32,6 @@ table_options:
table_option: {
COMMENT 'string_value'
| WATERMARK duration[,duration]
| MAX_DELAY duration[,duration]
| ROLLUP(func_name [, func_name] ...)
| SMA(col_name [, col_name] ...)
| TTL value
}
@ -54,11 +51,8 @@ table_option: {
**Parameter description**
1. COMMENT: specifies comments for the table. This parameter can be used with supertables, standard tables, and subtables.
2. WATERMARK: specifies the time after which the window is closed. The default value is 5 seconds. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database.
3. MAX_DELAY: specifies the maximum latency for pushing computation results. The default value is 15 minutes or the value of the INTERVAL parameter, whichever is smaller. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database.
4. ROLLUP: specifies aggregate functions to roll up. Rolling up a function provides downsampled results based on multiple axes. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. You can specify only one function to roll up. The rollup takes effect on all columns except TS. Enter one of the following values: avg, sum, min, max, last, or first.
5. SMA: specifies functions on which to enable small materialized aggregates (SMA). SMA is user-defined precomputation of aggregates based on data blocks. Enter one of the following values: max, min, or sum This parameter can be used with supertables and standard tables.
6. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The default value is 0, i.e. never expire.
2. SMA: specifies functions on which to enable small materialized aggregates (SMA). SMA is user-defined precomputation of aggregates based on data blocks. Enter one of the following values: max, min, or sum This parameter can be used with supertables and standard tables.
3. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The default value is 0, i.e. never expire.
## Create Subtables

View File

@ -53,7 +53,7 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
```
For more information about user-defined functions, see [User-Defined Functions](/develop/udf).
For more information about user-defined functions, see [User-Defined Functions](../../develop/udf).
## Manage UDF

View File

@ -41,7 +41,7 @@ Launch `TDinsight.sh` with the command above and restart Grafana, then open Dash
## log database
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](/reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
The data of tdinsight dashboard is stored in `log` database (default. You can change it in taoskeeper's config file. For more infrmation, please reference to [taoskeeper document](../../reference/taosKeeper)). The taoskeeper will create log database on taoskeeper startup.
### cluster\_info table

View File

@ -514,4 +514,4 @@ Response body:
## Reference
[taosAdapter](/reference/taosadapter/)
[taosAdapter](../taosadapter/)

View File

@ -24,7 +24,7 @@ The dynamic libraries for the TDengine client driver are located in.
## Supported platforms
Please refer to [list of supported platforms](/reference/connector#supported-platforms)
Please refer to [list of supported platforms](../#supported-platforms)
## Supported versions
@ -32,7 +32,7 @@ The version number of the TDengine client driver and the version number of the T
## Installation Steps
Please refer to the [Installation Steps](/reference/connector#installation-steps) for TDengine client driver installation
Please refer to the [Installation Steps](../#installation-steps) for TDengine client driver installation
## Establishing a connection
@ -394,7 +394,7 @@ The specific functions related to the interface are as follows (see also the [pr
### Schemaless Writing API
In addition to writing data using the SQL method or the parameter binding API, writing can also be done using schemaless writing, which eliminates the need to create a super table/data sub-table structure in advance and writes the data directly. The TDengine system automatically creates and maintains the required table structure based on the written data content. The use of schemaless writing is described in the chapter [Schemaless Writing](/reference/schemaless/), and the C/C++ API used with it is described here.
In addition to writing data using the SQL method or the parameter binding API, writing can also be done using schemaless writing, which eliminates the need to create a super table/data sub-table structure in advance and writes the data directly. The TDengine system automatically creates and maintains the required table structure based on the written data content. The use of schemaless writing is described in the chapter [Schemaless Writing](../../schemaless/), and the C/C++ API used with it is described here.
- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)`

View File

@ -148,7 +148,7 @@ TDengine currently supports timestamp, number, character, Boolean type, and the
**Note**: Only TAG supports JSON types
Due to historical reasons, the BINARY type data in TDengine is not truly binary data and is no longer recommended for use. Please use VARBINARY type instead.
GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type] (/tao-sql/data-type/#Data Types)
GEOMETRY type is binary data in little endian byte order, which complies with the WKB specification. For detailed information, please refer to [Data Type](../../../taos-sql/data-type/)
For WKB specifications, please refer to [Well Known Binary (WKB)] https://libgeos.org/specifications/wkb/
For Java connector, the jts library can be used to easily create GEOMETRY type objects, serialize them, and write them to TDengine. Here is an example [Geometry example]https://github.com/taosdata/TDengine/blob/3.0/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/GeometryDemo.java
@ -160,7 +160,7 @@ For Java connector, the jts library can be used to easily create GEOMETRY type o
Before using Java Connector to connect to the database, the following conditions are required.
- Java 1.8 or above runtime environment and Maven 3.6 or above installed
- TDengine client driver installed (required for native connections, not required for REST connections), please refer to [Installing Client Driver](/reference/connector#Install-Client-Driver)
- TDengine client driver installed (required for native connections, not required for REST connections), please refer to [Installing Client Driver](../#Install-Client-Driver)
### Install the connectors
@ -368,7 +368,7 @@ The configuration parameters in properties are as follows.
- TSDBDriver.PROPERTY_KEY_MESSAGE_WAIT_TIMEOUT: message transmission timeout in milliseconds, the default value is 60000 ms. It only takes effect when using JDBC REST connection and batchfetch is true.
- TSDBDriver.PROPERTY_KEY_USE_SSL: connecting Securely Using SSL. true: using SSL connection, false: not using SSL connection. It only takes effect when using JDBC REST connection.
- TSDBDriver.HTTP_POOL_SIZE: size of REST concurrent requests. The default value is 20.
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](/reference/config/#Client-Only).
For JDBC native connections, you can specify other parameters, such as log level, SQL length, etc., by specifying URL and Properties. For more detailed configuration, please refer to [Client Configuration](../../config/#Client-Only).
### Priority of configuration parameters

View File

@ -74,7 +74,7 @@ If it is a TDengine error, you can get the error code and error information in t
### Pre-installation preparation
* Install Go development environment (Go 1.14 and above, GCC 4.8.5 and above)
* If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector/#install-client-driver) for specific steps
* If you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](../#install-client-driver) for specific steps
Configure the environment variables and check the command.

View File

@ -80,7 +80,7 @@ Note: Only TAG supports JSON types
### Pre-installation preparation
* Install the Rust development toolchain
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver)
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](../#install-client-driver)
### Install the connectors

View File

@ -7,7 +7,7 @@ description: This document describes taospy, the TDengine Python connector.
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](../cpp) and [REST interface](../../rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
`taos-ws-py` is an optional package to enable using WebSocket to connect TDengine.
@ -17,7 +17,7 @@ The direct connection to the server using the native interface provided by the c
The source code for the Python connector is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
## Supported platforms
- The [supported platforms](/reference/connector/#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
- The [supported platforms](../#supported-platforms) for the native connection are the same as the ones supported by the TDengine client.
- REST connections are supported on all platforms that can run Python.
### Supported features
@ -95,7 +95,7 @@ TDengine currently supports timestamp, number, character, Boolean type, and the
1. Install Python. The recent taospy package requires Python 3.6.2+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
If you use a native connection, you will also need to [Install Client Driver](../#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
### Install via pip
@ -444,7 +444,7 @@ The best practice for TaosCursor is to create a cursor at the beginning of a que
##### Use of the RestClient class
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python title="Use of RestClient"
{{#include docs/examples/python/rest_client_example.py}}
@ -501,7 +501,7 @@ The queried results can only be fetched once. For example, only one of `fetch_al
<TabItem value="rest" label="REST connection">
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python
{{#include docs/examples/python/rest_client_example.py}}
@ -561,7 +561,7 @@ The `TaosConnection` class and the `TaosResult` class already implement all the
##### Use of the RestClient class
The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
The `RestClient` class is a direct wrapper for the [REST API](../../rest-api). It contains only a `sql()` method for executing arbitrary SQL statements and returning the result.
```python title="Use of RestClient"
{{#include docs/examples/python/rest_client_with_req_id_example.py}}

View File

@ -28,7 +28,7 @@ The REST connector supports all platforms that can run Node.js.
## Version support
Please refer to [version support list](/reference/connector#version-support)
Please refer to [version support list](../#version-support)
## Supported features
@ -58,7 +58,7 @@ Please refer to [version support list](/reference/connector#version-support)
### Pre-installation preparation
- Install the Node.js development environment
- If you are using the REST connector, skip this step. However, if you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](/reference/connector#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS.
- If you are using the REST connector, skip this step. However, if you use the native connector, please install the TDengine client driver. Please refer to [Install Client Driver](../#Install-Client-Driver) for more details. We use [node-gyp](https://github.com/nodejs/node-gyp) to interact with TDengine instances and also need to install some dependencies mentioned below depending on the specific OS.
<Tabs defaultValue="Linux">
<TabItem value="Linux" label="Linux system installation dependencies">

View File

@ -36,7 +36,7 @@ Please note TDengine does not support 32bit Windows any more.
## Version support
Please refer to [version support list](/reference/connector#version-support)
Please refer to [version support list](../#version-support)
## Supported features
@ -69,7 +69,7 @@ Please refer to [version support list](/reference/connector#version-support)
* Install the [.NET SDK](https://dotnet.microsoft.com/download)
* [Nuget Client](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (optional installation)
* Install TDengine client driver, please refer to [Install client driver](/reference/connector/#install-client-driver) for details
* Install TDengine client driver, please refer to [Install client driver](../#install-client-driver) for details
### Install `TDengine.Connector`

View File

@ -40,7 +40,7 @@ Because the version of TDengine client driver is tightly associated with that of
### Install TDengine Client Driver
Regarding how to install TDengine client driver please refer to [Install Client Driver](/reference/connector#installation-steps)
Regarding how to install TDengine client driver please refer to [Install Client Driver](../#installation-steps)
### Install php-tdengine

View File

@ -2,7 +2,7 @@
:::info
Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package).
Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](../../get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package).
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.

View File

@ -186,7 +186,7 @@ See [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/bl
### TDengine RESTful interface
You can use any client that supports the http protocol to write data to or query data from TDengine by accessing the REST interface address `http://<fqdn>:6041/rest/sql`. See the [official documentation](/reference/rest-api/) for details.
You can use any client that supports the http protocol to write data to or query data from TDengine by accessing the REST interface address `http://<fqdn>:6041/rest/sql`. See the [official documentation](../rest-api/) for details.
### InfluxDB
@ -202,7 +202,7 @@ Support InfluxDB query parameters as follows.
- `precision` The time precision used by TDengine
- `u` TDengine user name
- `p` TDengine password
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the first data of sub-table to create sub-table. For more information, please refer [Create Table](/taos-sql/table/#create-table)
- `ttl` The time to live of automatically created sub-table. This value cannot be updated. TDengine will use the ttl value of the first data of sub-table to create sub-table. For more information, please refer [Create Table](../../taos-sql/table/#create-table)
Note: InfluxDB token authorization is not supported at present. Only Basic authorization and query parameter validation are supported.
Example: curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"

View File

@ -31,7 +31,7 @@ There are two ways to install taosdump:
2. backup multiple specified databases: use `-D db1,db2,... ` parameters;
3. back up some super or normal tables in the specified database: use `dbname stbname1 stbname2 tbname1 tbname2 ... ` parameters. Note that the first parameter of this input sequence is the database name, and only one database is supported. The second and subsequent parameters are the names of super or normal tables in that database, separated by spaces.
4. back up the system log database: TDengine clusters usually contain a system database named `log`. The data in this database is the data that TDengine runs itself, and the taosdump will not back up the log database by default. If users need to back up the log database, users can use the `-a` or `-allow-sys` command-line parameter.
5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](/taos-sql/escape) for a description of escaped characters.
5. Loose mode backup: taosdump version 1.4.1 onwards provides `-n` and `-L` parameters for backing up data without using escape characters and "loose" mode, which can reduce the number of backups if table names, column names, tag names do not use escape characters. This can also reduce the backup data time and backup data footprint. If you are unsure about using `-n` and `-L` conditions, please use the default parameters for "strict" mode backup. See the [official documentation](../../taos-sql/escape) for a description of escaped characters.
:::tip
- taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema.

View File

@ -8,7 +8,7 @@ The TDengine command-line interface (hereafter referred to as `TDengine CLI`) is
## Installation
If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [Connector](/reference/connector/).
If executed on the TDengine server-side, there is no need for additional installation steps to install TDengine CLI as it is already included and installed automatically. To run TDengine CLI in an environment where no TDengine server is running, the TDengine client installation package needs to be installed first. For details, please refer to [Connector](../connector/).
## Execution
@ -18,7 +18,7 @@ To access the TDengine CLI, you can execute `taos` command-line utility from a t
taos
```
TDengine CLI will display a welcome message and version information if it successfully connected to the TDengine service. If it fails, TDengine CLI will print an error message. See [FAQ](/train-faq/faq) to solve the problem of terminal connection failure to the server. The TDengine CLI prompts as follows:
TDengine CLI will display a welcome message and version information if it successfully connected to the TDengine service. If it fails, TDengine CLI will print an error message. See [FAQ](../../train-faq/faq) to solve the problem of terminal connection failure to the server. The TDengine CLI prompts as follows:
```cmd
taos>

View File

@ -87,7 +87,7 @@ Ensure that your firewall rules do not block TCP port 6042 on any host in the c
| Protocol | Default Port | Description | How to configure |
| :------- | :----------- | :-------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- |
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](../taosadapter/) |
| TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters. |
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters. |

View File

@ -116,7 +116,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
10. Taos.cfg adds the configuration of smlTsDefaultName (with a string value), which only works on the client side. After configuration, the time column name of the schemaless automatic table creation can be set through this configuration. If not configured, defaults to _ts.
11. Super table name or child table name are case sensitive.
:::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](../../taos-sql/limit) for specific constraints in this area.
:::
## Time resolution recognition

View File

@ -16,7 +16,7 @@ Prometheus data can be stored in TDengine via the `remote_write` interface with
To write Prometheus data to TDengine requires the following preparations.
- The TDengine cluster is deployed and functioning properly
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
- Prometheus has been installed. Please refer to the [official documentation](https://prometheus.io/docs/prometheus/latest/installation/) for installing Prometheus
## Configuration steps

View File

@ -14,7 +14,7 @@ Telegraf's data can be written to TDengine by simply adding the output configura
To write Telegraf data to TDengine requires the following preparations.
- The TDengine cluster is deployed and functioning properly
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
- Telegraf has been installed. Please refer to the [official documentation](https://docs.influxdata.com/telegraf/v1.22/install/) for Telegraf installation.
- Telegraf collects the running status measurements of current system. You can enable [input plugins](https://docs.influxdata.com/telegraf/v1.22/plugins/) to insert [other formats](https://docs.influxdata.com/telegraf/v1.24/data_formats/input/) data to Telegraf then forward to TDengine.
@ -73,6 +73,6 @@ Query OK, 3 row(s) in set (0.013269s)
- TDengine take influxdb format data and create unique ID for table names by the rule.
The user can configure `smlChildTableName` parameter to generate specified table names if he/she needs. And he/she also need to insert data with specified data format.
For example, Add `smlChildTableName=tname` in the taos.cfg file. Insert data `st,tname=cpu1,t1=4 c1=3 1626006833639000000` then the table name will be cpu1. If there are multiple lines has same tname but different tag_set, the first line's tag_set will be used to automatically creating table and ignore other lines. Please refer to [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
For example, Add `smlChildTableName=tname` in the taos.cfg file. Insert data `st,tname=cpu1,t1=4 c1=3 1626006833639000000` then the table name will be cpu1. If there are multiple lines has same tname but different tag_set, the first line's tag_set will be used to automatically creating table and ignore other lines. Please refer to [TDengine Schemaless](../../reference/schemaless/#Schemaless-Line-Protocol)
:::

View File

@ -15,7 +15,7 @@ You can write the data collected by collectd to TDengine by simply modifying the
Writing collectd data to the TDengine requires several preparations.
- The TDengine cluster is deployed and running properly
- taosAdapter is installed and running, please refer to [taosAdapter's manual](/reference/taosadapter) for details
- taosAdapter is installed and running, please refer to [taosAdapter's manual](../../reference/taosadapter) for details
- collectd has been installed. Please refer to the [official documentation](https://collectd.org/download.shtml) to install collectd
## Configuration steps

View File

@ -14,7 +14,7 @@ You can write the data collected by icinga2 to TDengine by simply modifying the
To write icinga2 data to TDengine requires the following preparations.
- The TDengine cluster is deployed and working properly
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
- icinga2 has been installed. Please refer to the [official documentation](https://icinga.com/docs/icinga-2/latest/doc/02-installation/) for icinga2 installation
## Configuration steps

View File

@ -14,7 +14,7 @@ You can write the data collected by TCollector to TDengine by simply changing th
To write data to the TDengine via TCollector requires the following preparations.
- The TDengine cluster has been deployed and is working properly
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
- TCollector has been installed. Please refer to [official documentation](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html#installation-of-tcollector) for TCollector installation
## Configuration steps

View File

@ -82,7 +82,7 @@ Edit the resource configuration to add the key/value pairing for Authorization.
Basic cm9vdDp0YW9zZGF0YQ==
```
Please refer to the [ TDengine REST API documentation ](/reference/rest-api/) for the authorization in details.
Please refer to the [ TDengine REST API documentation ](../../reference/rest-api/) for the authorization in details.
Enter the rule engine replacement template in the message body:

View File

@ -94,7 +94,7 @@ The output as bellow:
The role of the TDengine Sink Connector is to synchronize the data of the specified topic to TDengine. Users do not need to create databases and super tables in advance. The name of the target database can be specified manually (see the configuration parameter connection.database), or it can be generated according to specific rules (see the configuration parameter connection.database.prefix).
TDengine Sink Connector internally uses TDengine [modeless write interface](/reference/connector/cpp#modeless write-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](/develop /insert-data/influxdb-line), [OpenTSDB Telnet protocol format](/develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json).
TDengine Sink Connector internally uses TDengine [modeless write interface](../../reference/connector/cpp#modeless write-api) to write data to TDengine, currently supports data in three formats: [InfluxDB line protocol format](../../develop/insert-data/influxdb-line), [OpenTSDB Telnet protocol format](../../develop/insert-data/opentsdb-telnet), and [OpenTSDB JSON protocol format](../../develop/insert-data/opentsdb-json).
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
@ -213,7 +213,7 @@ If you see the above data, the synchronization is successful. If not, check the
The role of the TDengine Source Connector is to push all the data of a specific TDengine database after a particular time to Kafka. The implementation principle of TDengine Source Connector is to first pull historical data in batches and then synchronize incremental data with the strategy of the regular query. At the same time, the changes in the table will be monitored, and the newly added table can be automatically synchronized. If Kafka Connect is restarted, synchronization will resume where it left off.
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](/develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](/develop/insert-data/opentsdb-json ) and then write to Kafka.
TDengine Source Connector will convert the data in TDengine data table into [InfluxDB Line protocol format](../../develop/insert-data/influxdb-line/) or [OpenTSDB JSON protocol format](../../develop/insert-data/opentsdb-json ) and then write to Kafka.
The following sample program synchronizes the data in the database test to the topic tdengine-test-meters.

View File

@ -14,4 +14,4 @@ Check the running status of taosAdapter.
systemctl status taosadapter
```
taosAdapter Please refer to the `taosadapter --help` command output and [reference documentation](/reference/taosadapter) for detailed configuration parameters and usage of taosAdapter.
taosAdapter Please refer to the `taosadapter --help` command output and [reference documentation](../../reference/taosadapter) for detailed configuration parameters and usage of taosAdapter.

View File

@ -41,7 +41,7 @@ Download and install the [latest version of TDengine](https://docs.tdengine.com/
### Install Grafana Plugin and Configure Data Source
Please refer to [Install Grafana Plugin and Configure Data Source](/third-party/grafana/#install-grafana-plugin-and-configure-data-source)
Please refer to [Install Grafana Plugin and Configure Data Source](../../third-party/grafana/#install-grafana-plugin-and-configure-data-source)
### Modify /etc/telegraf/telegraf.conf

View File

@ -44,7 +44,7 @@ Download and install the [latest version of TDengine](https://docs.tdengine.com/
### Install Grafana Plugin and Configure Data Source
Please refer to [Install Grafana Plugin and Configure Data Source](/third-party/grafana/#install-grafana-plugin-and-configure-data-source)
Please refer to [Install Grafana Plugin and Configure Data Source](../../third-party/grafana/#install-grafana-plugin-and-configure-data-source)
### Configure collectd

View File

@ -70,7 +70,7 @@ You can use collectd and push the data to taosAdapter utilizing the write_tsdb p
- **Tuning the Dashboard system**
After writing the data to TDengine, you can configure Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](/third-party/grafana).
After writing the data to TDengine, you can configure Grafana to visualize the data written to TDengine. To obtain and use the Grafana plugin provided by TDengine, please refer to [Links to other tools](../../third-party/grafana).
TDengine provides two sets of Dashboard templates by default, and users only need to import the templates from the Grafana directory into Grafana to activate their use.
@ -396,7 +396,7 @@ Hard disk writing performance has little effect on TDengine. The TDengine writin
### Computational resource estimates
Due to the characteristics of IoT data, when the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](/operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second.
Due to the characteristics of IoT data, when the frequency of data generation is consistent, the writing process of TDengine maintains a relatively fixed amount of resource consumption (computing and storage). According to the [TDengine Operation and Maintenance Guide](../../operation/) description, the system consumes less than 1 CPU core at 22,000 writes per second.
In estimating the CPU resources consumed by the query, assuming that the application requires the database to provide 10,000 QPS, the CPU time consumed by each query is about 1 ms. The query provided by each core per second is 1,000 QPS, which satisfies 10,000 QPS. The query request requires at least 10 cores. For the system as a whole system to have less than 50% CPU load, the entire cluster needs twice as many cores i.e. 20 cores.

View File

@ -20,7 +20,7 @@ def get_ts(ts: str):
def create_stable():
conn = taos.connect()
try:
conn.execute("CREATE DATABASE power")
conn.execute("CREATE DATABASE power keep 36500")
conn.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
"TAGS (location BINARY(64), groupId INT)")
finally:

View File

@ -4,7 +4,7 @@ import taos
taos_conn = taos.connect()
taos_conn.execute('drop database if exists power')
taos_conn.execute('create database if not exists power wal_retention_period 3600')
taos_conn.execute('create database if not exists power wal_retention_period 3600 keep 36500 ')
taos_conn.execute("use power")
taos_conn.execute(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")

View File

@ -11,7 +11,7 @@ conn = connect(url="http://localhost:6041",
# create STable
cursor = conn.cursor()
cursor.execute("DROP DATABASE IF EXISTS power")
cursor.execute("CREATE DATABASE power")
cursor.execute("CREATE DATABASE power keep 36500 ")
cursor.execute(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")

View File

@ -11,7 +11,7 @@ conn = connect(url="http://localhost:6041",
# create STable
cursor = conn.cursor()
cursor.execute("DROP DATABASE IF EXISTS power", req_id=1)
cursor.execute("CREATE DATABASE power", req_id=2)
cursor.execute("CREATE DATABASE power keep 36500", req_id=2)
cursor.execute(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)", req_id=3)

View File

@ -6,13 +6,13 @@ conn = taosws.connect("taosws://root:taosdata@localhost:6041")
# ANCHOR: basic
conn.execute("drop database if exists connwspy")
conn.execute("create database if not exists connwspy wal_retention_period 3600")
conn.execute("create database if not exists connwspy wal_retention_period 3600 keep 36500 ")
conn.execute("use connwspy")
conn.execute("create table if not exists stb (ts timestamp, c1 int) tags (t1 int)")
conn.execute("create table if not exists tb1 using stb tags (1)")
conn.execute("insert into tb1 values (now, 1)")
conn.execute("insert into tb1 values (now, 2)")
conn.execute("insert into tb1 values (now, 3)")
conn.execute("insert into tb1 values (now+1s, 2)")
conn.execute("insert into tb1 values (now+2s, 3)")
r = conn.execute("select * from stb")
result = conn.query("select * from stb")

View File

@ -6,7 +6,7 @@ conn = taosws.connect("taosws://root:taosdata@localhost:6041")
# ANCHOR: basic
conn.execute("drop database if exists connwspy", req_id=1)
conn.execute("create database if not exists connwspy", req_id=2)
conn.execute("create database if not exists connwspy keep 36500", req_id=2)
conn.execute("use connwspy", req_id=3)
conn.execute("create table if not exists stb (ts timestamp, c1 int) tags (t1 int)", req_id=4)
conn.execute("create table if not exists tb1 using stb tags (1)", req_id=5)

View File

@ -4,7 +4,7 @@ import taos
conn = taos.connect()
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
conn.execute("DROP DATABASE IF EXISTS test")
conn.execute("CREATE DATABASE test")
conn.execute("CREATE DATABASE test keep 36500")
# change database. same as execute "USE db"
conn.select_db("test")
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")

View File

@ -4,7 +4,7 @@ import taos
conn = taos.connect()
# Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement.
conn.execute("DROP DATABASE IF EXISTS test", req_id=1)
conn.execute("CREATE DATABASE test", req_id=2)
conn.execute("CREATE DATABASE test keep 36500", req_id=2)
# change database. same as execute "USE db"
conn.select_db("test")
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)", req_id=3)

View File

@ -4,7 +4,7 @@ conn = taos.connect()
cursor = conn.cursor()
cursor.execute("DROP DATABASE IF EXISTS test")
cursor.execute("CREATE DATABASE test")
cursor.execute("CREATE DATABASE test keep 36500")
cursor.execute("USE test")
cursor.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")

View File

@ -4,7 +4,7 @@ conn = taos.connect()
cursor = conn.cursor()
cursor.execute("DROP DATABASE IF EXISTS test", req_id=1)
cursor.execute("CREATE DATABASE test", req_id=2)
cursor.execute("CREATE DATABASE test keep 36500", req_id=2)
cursor.execute("USE test", req_id=3)
cursor.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)", req_id=4)

View File

@ -160,7 +160,7 @@ def main(infinity):
conn = get_connection()
conn.execute("DROP DATABASE IF EXISTS test")
conn.execute("CREATE DATABASE IF NOT EXISTS test")
conn.execute("CREATE DATABASE IF NOT EXISTS test keep 36500")
conn.execute("CREATE STABLE IF NOT EXISTS test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
"TAGS (location BINARY(64), groupId INT)")
conn.close()

View File

@ -16,7 +16,7 @@ def get_connection():
def create_database(conn):
conn.execute("CREATE DATABASE test")
conn.execute("CREATE DATABASE test keep 36500")
conn.execute("USE test")

View File

@ -5,7 +5,7 @@ LOCATIONS = ['California.SanFrancisco', 'California.LosAngles', 'California.SanD
'California.PaloAlto', 'California.Campbell', 'California.MountainView', 'California.Sunnyvale',
'California.SantaClara', 'California.Cupertino']
CREATE_DATABASE_SQL = 'create database if not exists {} keep 365 duration 10 buffer 16 wal_level 1 wal_retention_period 3600'
CREATE_DATABASE_SQL = 'create database if not exists {} keep 36500 duration 10 buffer 16 wal_level 1 wal_retention_period 3600'
USE_DATABASE_SQL = 'use {}'
DROP_TABLE_SQL = 'drop table if exists meters'
DROP_DATABASE_SQL = 'drop database if exists {}'

View File

@ -15,7 +15,7 @@ def get_connection():
def create_database(conn):
# the default precision is ms (microsecond), but we use us(microsecond) here.
conn.execute("CREATE DATABASE test precision 'us'")
conn.execute("CREATE DATABASE test precision 'us' keep 36500")
conn.execute("USE test")

View File

@ -71,7 +71,7 @@ def insert_data():
def create_stable():
conn = taos.connect()
try:
conn.execute("CREATE DATABASE power")
conn.execute("CREATE DATABASE power keep 36500")
conn.execute("CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
"TAGS (location BINARY(64), groupId INT)")
finally:

View File

@ -18,7 +18,7 @@ def get_connection() -> taos.TaosConnection:
def create_stable(conn: taos.TaosConnection):
conn.execute("CREATE DATABASE power")
conn.execute("CREATE DATABASE power keep 36500")
conn.execute("USE power")
conn.execute("CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
"TAGS (location BINARY(64), groupId INT)")

View File

@ -2,7 +2,7 @@ import taos
conn = taos.connect()
conn.execute("DROP DATABASE IF EXISTS test")
conn.execute("CREATE DATABASE test")
conn.execute("CREATE DATABASE test keep 36500")
conn.select_db("test")
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)")
# prepare data

View File

@ -2,7 +2,7 @@ import taos
conn = taos.connect()
conn.execute("DROP DATABASE IF EXISTS test", req_id=1)
conn.execute("CREATE DATABASE test", req_id=2)
conn.execute("CREATE DATABASE test keep 36500", req_id=2)
conn.select_db("test")
conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (location INT)", req_id=3)
# prepare data

View File

@ -3,7 +3,7 @@ import taos
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.execute("create database if not exists %s precision 'us' keep 36500" % dbname)
conn.select_db(dbname)
lines = [

View File

@ -10,9 +10,9 @@ try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
conn.execute("create database if not exists %s schemaless 1 precision 'ns' keep 36500" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.execute("create database if not exists %s update 2 precision 'ns' keep 36500" % dbname)
conn.select_db(dbname)

View File

@ -10,9 +10,9 @@ try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
conn.execute("create database if not exists %s schemaless 1 precision 'ns' keep 36500" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.execute("create database if not exists %s update 2 precision 'ns' keep 36500" % dbname)
conn.select_db(dbname)

View File

@ -10,9 +10,9 @@ try:
conn.execute("drop database if exists %s" % dbname)
if taos.IS_V3:
conn.execute("create database if not exists %s schemaless 1 precision 'ns'" % dbname)
conn.execute("create database if not exists %s schemaless 1 precision 'ns' keep 36500" % dbname)
else:
conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.execute("create database if not exists %s update 2 precision 'ns' keep 36500" % dbname)
conn.select_db(dbname)

View File

@ -4,7 +4,7 @@ from taos import SmlProtocol, SmlPrecision
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.execute("create database if not exists %s precision 'us' keep 36500" % dbname)
conn.select_db(dbname)
lines = [

View File

@ -4,7 +4,7 @@ from taos import SmlProtocol, SmlPrecision
conn = taos.connect()
dbname = "pytest_line"
conn.execute("drop database if exists %s" % dbname)
conn.execute("create database if not exists %s precision 'us'" % dbname)
conn.execute("create database if not exists %s precision 'us' keep 36500" % dbname)
conn.select_db(dbname)
lines = [

View File

@ -10,7 +10,7 @@ class SQLWriter:
self._tb_tags = {}
self._conn = get_connection_func()
self._max_sql_length = self.get_max_sql_length()
self._conn.execute("create database if not exists test")
self._conn.execute("create database if not exists test keep 36500")
self._conn.execute("USE test")
def get_max_sql_length(self):

View File

@ -10,7 +10,7 @@ db_name = 'test_ws_stmt'
def before():
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.execute("create database %s" % db_name)
taos_conn.execute("create database %s keep 36500" % db_name)
taos_conn.select_db(db_name)
taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))")
taos_conn.execute(

View File

@ -9,7 +9,7 @@ import taos
def before_test(db_name):
taos_conn = taos.connect()
taos_conn.execute("drop database if exists %s" % db_name)
taos_conn.execute("create database %s" % db_name)
taos_conn.execute("create database %s keep 36500" % db_name)
taos_conn.select_db(db_name)
taos_conn.execute("create table t1 (ts timestamp, a int, b float, c varchar(10))")
taos_conn.execute(

View File

@ -19,7 +19,7 @@ def get_connection():
def create_database(conn):
conn.execute("CREATE DATABASE test")
conn.execute("CREATE DATABASE test keep 36500")
conn.execute("USE test")

View File

@ -7,7 +7,7 @@ def prepare():
conn = taos.connect()
conn.execute("drop topic if exists tmq_assignment_demo_topic")
conn.execute("drop database if exists tmq_assignment_demo_db")
conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600 keep 36500")
conn.select_db("tmq_assignment_demo_db")
conn.execute(
"create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")

View File

@ -6,7 +6,7 @@ def init_tmq_env(db, topic):
conn = taos.connect()
conn.execute("drop topic if exists {}".format(topic))
conn.execute("drop database if exists {}".format(db))
conn.execute("create database if not exists {} wal_retention_period 3600".format(db))
conn.execute("create database if not exists {} wal_retention_period 3600 keep 36500".format(db))
conn.select_db(db)
conn.execute(
"create stable if not exists stb1 (ts timestamp, c1 int, c2 float, c3 varchar(16)) tags(t1 int, t3 varchar(16))")

View File

@ -6,7 +6,7 @@ def prepare():
conn = taos.connect()
conn.execute("drop topic if exists tmq_assignment_demo_topic")
conn.execute("drop database if exists tmq_assignment_demo_db")
conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600")
conn.execute("create database if not exists tmq_assignment_demo_db wal_retention_period 3600 keep 36500")
conn.select_db("tmq_assignment_demo_db")
conn.execute(
"create table if not exists tmq_assignment_demo_table (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)")

View File

@ -26,7 +26,6 @@ database_option: {
| PAGESIZE value
| PRECISION {'ms' | 'us' | 'ns'}
| REPLICA value
| RETENTIONS ingestion_duration:keep_duration ...
| WAL_LEVEL {1 | 2}
| VGROUPS value
| SINGLE_STABLE {0 | 1}
@ -61,7 +60,6 @@ database_option: {
- PAGESIZE一个 VNODE 中元数据存储引擎的页大小,单位为 KB默认为 4 KB。范围为 1 到 16384即 1 KB 到 16 MB。
- PRECISION数据库的时间戳精度。ms 表示毫秒us 表示微秒ns 表示纳秒,默认 ms 毫秒。
- REPLICA表示数据库副本数取值为 1 或 3默认为 1。在集群中使用副本数必须小于或等于 DNODE 的数目。
- RETENTIONS表示数据的聚合周期和保存时长如 RETENTIONS 15s:7d,1m:21d,15m:50d 表示数据原始采集周期为 15 秒,原始数据保存 7 天;按 1 分钟聚合的数据保存 21 天;按 15 分钟聚合的数据保存 50 天。目前支持且只支持三级存储周期。
- WAL_LEVELWAL 级别,默认为 1。
- 1写 WAL但不执行 fsync。
- 2写 WAL而且执行 fsync。

View File

@ -30,9 +30,6 @@ table_options:
table_option: {
COMMENT 'string_value'
| WATERMARK duration[,duration]
| MAX_DELAY duration[,duration]
| ROLLUP(func_name [, func_name] ...)
| SMA(col_name [, col_name] ...)
| TTL value
}
@ -52,11 +49,8 @@ table_option: {
**参数说明**
1. COMMENT表注释。可用于超级表、子表和普通表。
2. WATERMARK指定窗口的关闭时间默认值为 5 秒,最小单位毫秒,范围为 0 到 15 分钟,多个以逗号分隔。只可用于超级表,且只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。
3. MAX_DELAY用于控制推送计算结果的最大延迟默认值为 interval 的值(但不能超过最大值),最小单位毫秒,范围为 1 毫秒到 15 分钟,多个以逗号分隔。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。只可用于超级表,且只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。
4. ROLLUPRollup 指定的聚合函数,提供基于多层级的降采样聚合结果。只可用于超级表。只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。作用于超级表除 TS 列外的其它所有列,但是只能定义一个聚合函数。 聚合函数支持 avg, sum, min, max, last, first。
5. SMASmall Materialized Aggregates提供基于数据块的自定义预计算功能。预计算类型包括 MAX、MIN 和 SUM。可用于超级表/普通表。
6. TTLTime to Live是用户用来指定表的生命周期的参数。如果创建表时指定了这个参数当该表的存在时间超过 TTL 指定的时间后TDengine 自动删除该表。这个 TTL 的时间只是一个大概时间系统不保证到了时间一定会将其删除而只保证存在这样一个机制且最终一定会删除。TTL 单位是天,默认为 0表示不限制到期时间为表创建时间加上 TTL 时间。TTL 与数据库 KEEP 参数没有关联,如果 KEEP 比 TTL 小,在表被删除之前数据也可能已经被删除。
2. SMASmall Materialized Aggregates提供基于数据块的自定义预计算功能。预计算类型包括 MAX、MIN 和 SUM。可用于超级表/普通表。
3. TTLTime to Live是用户用来指定表的生命周期的参数。如果创建表时指定了这个参数当该表的存在时间超过 TTL 指定的时间后TDengine 自动删除该表。这个 TTL 的时间只是一个大概时间系统不保证到了时间一定会将其删除而只保证存在这样一个机制且最终一定会删除。TTL 单位是天,默认为 0表示不限制到期时间为表创建时间加上 TTL 时间。TTL 与数据库 KEEP 参数没有关联,如果 KEEP 比 TTL 小,在表被删除之前数据也可能已经被删除。
## 创建子表

View File

@ -43,6 +43,7 @@ int32_t s3GetObjectBlock(const char *object_name, int64_t offset, int64_t size,
int32_t s3GetObjectsByPrefix(const char *prefix, const char *path);
void s3EvictCache(const char *path, long object_size);
long s3Size(const char *object_name);
int32_t s3GetObjectToFile(const char *object_name, char *fileName);
#ifdef __cplusplus
}

View File

@ -258,8 +258,6 @@ typedef struct SQueryTableDataCond {
int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock);
void* tDecodeDataBlock(const void* buf, SSDataBlock* pBlock);
int32_t tEncodeDataBlocks(void** buf, const SArray* blocks);
void* tDecodeDataBlocks(const void* buf, SArray** blocks);
void colDataDestroy(SColumnInfoData* pColData);
//======================================================================================================================
@ -294,7 +292,7 @@ typedef struct STableBlockDistInfo {
int32_t defMaxRows;
int32_t firstSeekTimeUs;
uint32_t numOfInmemRows;
uint32_t numOfSmallBlocks;
uint32_t numOfSttRows;
uint32_t numOfVgroups;
int32_t blockRowsHisto[20];
} STableBlockDistInfo;

View File

@ -75,12 +75,15 @@ extern int32_t tsElectInterval;
extern int32_t tsHeartbeatInterval;
extern int32_t tsHeartbeatTimeout;
// vnode
extern int64_t tsVndCommitMaxIntervalMs;
// snode
extern int32_t tsRsyncPort;
extern char tsCheckpointBackupDir[];
extern char tsCheckpointBackupDir[];
// vnode checkpoint
extern char tsSnodeAddress[]; //127.0.0.1:873
extern char tsSnodeAddress[]; // 127.0.0.1:873
// mnode
extern int64_t tsMndSdbWriteDelta;
@ -104,8 +107,8 @@ extern int32_t tsMonitorMaxLogs;
extern bool tsMonitorComp;
// audit
extern bool tsEnableAudit;
extern bool tsEnableAuditCreateTable;
extern bool tsEnableAudit;
extern bool tsEnableAuditCreateTable;
// telem
extern bool tsEnableTelem;
@ -113,9 +116,9 @@ extern int32_t tsTelemInterval;
extern char tsTelemServer[];
extern uint16_t tsTelemPort;
extern bool tsEnableCrashReport;
extern char *tsTelemUri;
extern char *tsClientCrashReportUri;
extern char *tsSvrCrashReportUri;
extern char * tsTelemUri;
extern char * tsClientCrashReportUri;
extern char * tsSvrCrashReportUri;
// query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing

View File

@ -31,8 +31,6 @@ extern "C" {
#endif
#define GRANT_HEART_BEAT_MIN 2
#define GRANT_ACTIVE_CODE "activeCode"
#define GRANT_C_ACTIVE_CODE "cActiveCode"
typedef enum {
TSDB_GRANT_ALL,
@ -52,11 +50,6 @@ typedef enum {
TSDB_GRANT_TABLE,
} EGrantType;
typedef struct {
int64_t grantedTime;
int64_t connGrantedTime;
} SGrantedInfo;
int32_t grantCheck(EGrantType grant);
int32_t grantAlterActiveCode(int32_t did, const char* old, const char* newer, char* out, int8_t type);

View File

@ -35,12 +35,14 @@ extern "C" {
#define TD_MSG_NUMBER_
#undef TD_MSG_DICT_
#undef TD_MSG_INFO_
#undef TD_MSG_RANGE_CODE_
#undef TD_MSG_SEG_CODE_
#include "tmsgdef.h"
#undef TD_MSG_NUMBER_
#undef TD_MSG_DICT_
#undef TD_MSG_INFO_
#undef TD_MSG_RANGE_CODE_
#define TD_MSG_SEG_CODE_
#include "tmsgdef.h"
@ -48,33 +50,31 @@ extern "C" {
#undef TD_MSG_DICT_
#undef TD_MSG_INFO_
#undef TD_MSG_SEG_CODE_
#undef TD_MSG_RANGE_CODE_
#include "tmsgdef.h"
extern char* tMsgInfo[];
extern int32_t tMsgDict[];
#define TMSG_SEG_CODE(TYPE) (((TYPE)&0xff00) >> 8)
#define TMSG_SEG_SEQ(TYPE) ((TYPE)&0xff)
#define TMSG_INFO(TYPE) \
((TYPE) < TDMT_DND_MAX_MSG || (TYPE) < TDMT_MND_MAX_MSG || (TYPE) < TDMT_VND_MAX_MSG || (TYPE) < TDMT_SCH_MAX_MSG || \
(TYPE) < TDMT_STREAM_MAX_MSG || (TYPE) < TDMT_MON_MAX_MSG || (TYPE) < TDMT_SYNC_MAX_MSG) || \
(TYPE) < TDMT_VND_STREAM_MSG || (TYPE) < TDMT_VND_TMQ_MSG || (TYPE) < TDMT_VND_TMQ_MAX_MSG \
? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] \
: 0
#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE))
extern int32_t tMsgRangeDict[];
typedef uint16_t tmsg_t;
#define TMSG_SEG_CODE(TYPE) (((TYPE)&0xff00) >> 8)
#define TMSG_SEG_SEQ(TYPE) ((TYPE)&0xff)
#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE))
static inline bool tmsgIsValid(tmsg_t type) {
if (type < TDMT_DND_MAX_MSG || type < TDMT_MND_MAX_MSG || type < TDMT_VND_MAX_MSG || type < TDMT_SCH_MAX_MSG ||
type < TDMT_STREAM_MAX_MSG || type < TDMT_MON_MAX_MSG || type < TDMT_SYNC_MAX_MSG || type < TDMT_VND_STREAM_MSG ||
type < TDMT_VND_TMQ_MSG || type < TDMT_VND_TMQ_MAX_MSG) {
return true;
} else {
return false;
// static int8_t sz = sizeof(tMsgRangeDict) / sizeof(tMsgRangeDict[0]);
int8_t maxSegIdx = TMSG_SEG_CODE(TDMT_MAX_MSG);
int segIdx = TMSG_SEG_CODE(type);
if (segIdx >= 0 && segIdx < maxSegIdx) {
return type < tMsgRangeDict[segIdx];
}
return false;
}
#define TMSG_INFO(type) (tmsgIsValid(type) ? tMsgInfo[TMSG_INDEX(type)] : "unKnown")
static inline bool vnodeIsMsgBlock(tmsg_t type) {
return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) ||
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_COMMIT) ||
@ -169,14 +169,14 @@ typedef enum _mgmt_table {
#define TSDB_FILL_PREV 6
#define TSDB_FILL_NEXT 7
#define TSDB_ALTER_USER_PASSWD 0x1
#define TSDB_ALTER_USER_SUPERUSER 0x2
#define TSDB_ALTER_USER_ENABLE 0x3
#define TSDB_ALTER_USER_SYSINFO 0x4
#define TSDB_ALTER_USER_ADD_PRIVILEGES 0x5
#define TSDB_ALTER_USER_DEL_PRIVILEGES 0x6
#define TSDB_ALTER_USER_ADD_WHITE_LIST 0x7
#define TSDB_ALTER_USER_DROP_WHITE_LIST 0x8
#define TSDB_ALTER_USER_PASSWD 0x1
#define TSDB_ALTER_USER_SUPERUSER 0x2
#define TSDB_ALTER_USER_ENABLE 0x3
#define TSDB_ALTER_USER_SYSINFO 0x4
#define TSDB_ALTER_USER_ADD_PRIVILEGES 0x5
#define TSDB_ALTER_USER_DEL_PRIVILEGES 0x6
#define TSDB_ALTER_USER_ADD_WHITE_LIST 0x7
#define TSDB_ALTER_USER_DROP_WHITE_LIST 0x8
#define TSDB_KILL_MSG_LEN 30
@ -295,7 +295,36 @@ typedef enum ENodeType {
QUERY_NODE_SYNCDB_STMT,
QUERY_NODE_GRANT_STMT,
QUERY_NODE_REVOKE_STMT,
QUERY_NODE_SHOW_DNODES_STMT,
// placeholder for [152, 180]
QUERY_NODE_SHOW_CREATE_VIEW_STMT = 181,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
QUERY_NODE_SHOW_SCORES_STMT,
QUERY_NODE_SHOW_TABLE_TAGS_STMT,
QUERY_NODE_KILL_CONNECTION_STMT,
QUERY_NODE_KILL_QUERY_STMT,
QUERY_NODE_KILL_TRANSACTION_STMT,
QUERY_NODE_DELETE_STMT,
QUERY_NODE_INSERT_STMT,
QUERY_NODE_QUERY,
QUERY_NODE_SHOW_DB_ALIVE_STMT,
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
QUERY_NODE_RESTORE_DNODE_STMT,
QUERY_NODE_RESTORE_QNODE_STMT,
QUERY_NODE_RESTORE_MNODE_STMT,
QUERY_NODE_RESTORE_VNODE_STMT,
QUERY_NODE_PAUSE_STREAM_STMT,
QUERY_NODE_RESUME_STREAM_STMT,
QUERY_NODE_CREATE_VIEW_STMT,
QUERY_NODE_DROP_VIEW_STMT,
// show statement nodes
// see 'sysTableShowAdapter', 'SYSTABLE_SHOW_TYPE_OFFSET'
QUERY_NODE_SHOW_DNODES_STMT = 400,
QUERY_NODE_SHOW_MNODES_STMT,
QUERY_NODE_SHOW_MODULES_STMT,
QUERY_NODE_SHOW_QNODES_STMT,
@ -324,31 +353,6 @@ typedef enum ENodeType {
QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_USER_PRIVILEGES_STMT,
QUERY_NODE_SHOW_VIEWS_STMT,
QUERY_NODE_SHOW_CREATE_VIEW_STMT,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
QUERY_NODE_SHOW_SCORES_STMT,
QUERY_NODE_SHOW_TABLE_TAGS_STMT,
QUERY_NODE_KILL_CONNECTION_STMT,
QUERY_NODE_KILL_QUERY_STMT,
QUERY_NODE_KILL_TRANSACTION_STMT,
QUERY_NODE_DELETE_STMT,
QUERY_NODE_INSERT_STMT,
QUERY_NODE_QUERY,
QUERY_NODE_SHOW_DB_ALIVE_STMT,
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
QUERY_NODE_RESTORE_DNODE_STMT,
QUERY_NODE_RESTORE_QNODE_STMT,
QUERY_NODE_RESTORE_MNODE_STMT,
QUERY_NODE_RESTORE_VNODE_STMT,
QUERY_NODE_PAUSE_STREAM_STMT,
QUERY_NODE_RESUME_STREAM_STMT,
QUERY_NODE_CREATE_VIEW_STMT,
QUERY_NODE_DROP_VIEW_STMT,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
@ -791,7 +795,7 @@ typedef struct {
int32_t tSerializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
int32_t tDeserializeSMDropStbReq(void* buf, int32_t bufLen, SMDropStbReq* pReq);
void tFreeSMDropStbReq(SMDropStbReq *pReq);
void tFreeSMDropStbReq(SMDropStbReq* pReq);
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
@ -872,18 +876,18 @@ int32_t tSerializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq
int32_t tDeserializeSCreateAcctReq(void* buf, int32_t bufLen, SCreateAcctReq* pReq);
typedef struct {
char user[TSDB_USER_LEN];
char user[TSDB_USER_LEN];
int32_t sqlLen;
char* sql;
} SDropUserReq, SDropAcctReq;
int32_t tSerializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
int32_t tDeserializeSDropUserReq(void* buf, int32_t bufLen, SDropUserReq* pReq);
void tFreeSDropUserReq(SDropUserReq *pReq);
void tFreeSDropUserReq(SDropUserReq* pReq);
typedef struct SIpV4Range{
uint32_t ip;
uint32_t mask;
typedef struct SIpV4Range {
uint32_t ip;
uint32_t mask;
} SIpV4Range;
typedef struct {
@ -893,21 +897,21 @@ typedef struct {
SIpWhiteList* cloneIpWhiteList(SIpWhiteList* pIpWhiteList);
typedef struct {
int8_t createType;
int8_t superUser; // denote if it is a super user or not
int8_t sysInfo;
int8_t enable;
char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN];
int8_t createType;
int8_t superUser; // denote if it is a super user or not
int8_t sysInfo;
int8_t enable;
char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN];
int32_t numIpRanges;
SIpV4Range* pIpRanges;
int32_t sqlLen;
char* sql;
int32_t sqlLen;
char* sql;
} SCreateUserReq;
int32_t tSerializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
int32_t tDeserializeSCreateUserReq(void* buf, int32_t bufLen, SCreateUserReq* pReq);
void tFreeSCreateUserReq(SCreateUserReq *pReq);
void tFreeSCreateUserReq(SCreateUserReq* pReq);
typedef struct {
int64_t ver;
@ -934,22 +938,22 @@ int32_t tSerializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq
int32_t tDeserializeRetrieveIpWhite(void* buf, int32_t bufLen, SRetrieveIpWhiteReq* pReq);
typedef struct {
int8_t alterType;
int8_t superUser;
int8_t sysInfo;
int8_t enable;
int8_t isView;
char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN];
char objname[TSDB_DB_FNAME_LEN]; // db or topic
char tabName[TSDB_TABLE_NAME_LEN];
char* tagCond;
int32_t tagCondLen;
int8_t alterType;
int8_t superUser;
int8_t sysInfo;
int8_t enable;
int8_t isView;
char user[TSDB_USER_LEN];
char pass[TSDB_USET_PASSWORD_LEN];
char objname[TSDB_DB_FNAME_LEN]; // db or topic
char tabName[TSDB_TABLE_NAME_LEN];
char* tagCond;
int32_t tagCondLen;
int32_t numIpRanges;
SIpV4Range* pIpRanges;
int64_t privileges;
int32_t sqlLen;
char* sql;
int32_t sqlLen;
char* sql;
} SAlterUserReq;
int32_t tSerializeSAlterUserReq(void* buf, int32_t bufLen, SAlterUserReq* pReq);
@ -979,9 +983,9 @@ typedef struct {
SHashObj* alterTbs;
SHashObj* readViews;
SHashObj* writeViews;
SHashObj* alterViews;
SHashObj* alterViews;
SHashObj* useDbs;
int64_t whiteListVer;
int64_t whiteListVer;
} SGetUserAuthRsp;
int32_t tSerializeSGetUserAuthRsp(void* buf, int32_t bufLen, SGetUserAuthRsp* pRsp);
@ -996,8 +1000,8 @@ int32_t tSerializeSGetUserWhiteListReq(void* buf, int32_t bufLen, SGetUserWhiteL
int32_t tDeserializeSGetUserWhiteListReq(void* buf, int32_t bufLen, SGetUserWhiteListReq* pReq);
typedef struct {
char user[TSDB_USER_LEN];
int32_t numWhiteLists;
char user[TSDB_USER_LEN];
int32_t numWhiteLists;
SIpV4Range* pWhiteLists;
} SGetUserWhiteListRsp;
@ -1170,8 +1174,8 @@ int32_t tDeserializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
void tFreeSAlterDbReq(SAlterDbReq* pReq);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
int8_t ignoreNotExists;
char db[TSDB_DB_FNAME_LEN];
int8_t ignoreNotExists;
int32_t sqlLen;
char* sql;
} SDropDbReq;
@ -1379,7 +1383,7 @@ typedef struct {
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
int32_t tDeserializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
void tFreeSCompactDbReq(SCompactDbReq *pReq);
void tFreeSCompactDbReq(SCompactDbReq* pReq);
typedef struct {
char name[TSDB_FUNC_NAME_LEN];
@ -1818,7 +1822,6 @@ int32_t tSerializeSViewHbRsp(void* buf, int32_t bufLen, SViewHbRsp* pRsp);
int32_t tDeserializeSViewHbRsp(void* buf, int32_t bufLen, SViewHbRsp* pRsp);
void tFreeSViewHbRsp(SViewHbRsp* pRsp);
typedef struct {
int32_t numOfTables;
int32_t numOfVgroup;
@ -2007,7 +2010,7 @@ typedef struct {
int32_t tSerializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
int32_t tDeserializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
void tFreeSRestoreDnodeReq(SRestoreDnodeReq *pReq);
void tFreeSRestoreDnodeReq(SRestoreDnodeReq* pReq);
typedef struct {
int32_t dnodeId;
@ -2019,7 +2022,7 @@ typedef struct {
int32_t tSerializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
int32_t tDeserializeSMCfgDnodeReq(void* buf, int32_t bufLen, SMCfgDnodeReq* pReq);
void tFreeSMCfgDnodeReq(SMCfgDnodeReq *pReq);
void tFreeSMCfgDnodeReq(SMCfgDnodeReq* pReq);
typedef struct {
char config[TSDB_DNODE_CONFIG_LEN];
@ -2038,7 +2041,7 @@ typedef struct {
int32_t tSerializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
int32_t tDeserializeSCreateDropMQSNodeReq(void* buf, int32_t bufLen, SMCreateQnodeReq* pReq);
void tFreeSMCreateQnodeReq(SMCreateQnodeReq *pReq);
void tFreeSMCreateQnodeReq(SMCreateQnodeReq* pReq);
void tFreeSDDropQnodeReq(SDDropQnodeReq* pReq);
typedef struct {
int8_t replica;
@ -2080,7 +2083,7 @@ typedef struct {
int32_t tSerializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
int32_t tDeserializeSBalanceVgroupReq(void* buf, int32_t bufLen, SBalanceVgroupReq* pReq);
void tFreeSBalanceVgroupReq(SBalanceVgroupReq *pReq);
void tFreeSBalanceVgroupReq(SBalanceVgroupReq* pReq);
typedef struct {
int32_t vgId1;
@ -2101,7 +2104,7 @@ typedef struct {
int32_t tSerializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
int32_t tDeserializeSRedistributeVgroupReq(void* buf, int32_t bufLen, SRedistributeVgroupReq* pReq);
void tFreeSRedistributeVgroupReq(SRedistributeVgroupReq *pReq);
void tFreeSRedistributeVgroupReq(SRedistributeVgroupReq* pReq);
typedef struct {
int32_t useless;
@ -2112,7 +2115,7 @@ typedef struct {
int32_t tSerializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
int32_t tDeserializeSBalanceVgroupLeaderReq(void* buf, int32_t bufLen, SBalanceVgroupLeaderReq* pReq);
void tFreeSBalanceVgroupLeaderReq(SBalanceVgroupLeaderReq *pReq);
void tFreeSBalanceVgroupLeaderReq(SBalanceVgroupLeaderReq* pReq);
typedef struct {
int32_t vgId;
@ -2504,15 +2507,15 @@ typedef struct {
} SMVSubscribeRsp;
typedef struct {
char name[TSDB_TOPIC_FNAME_LEN];
int8_t igNotExists;
char name[TSDB_TOPIC_FNAME_LEN];
int8_t igNotExists;
int32_t sqlLen;
char* sql;
} SMDropTopicReq;
int32_t tSerializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
int32_t tDeserializeSMDropTopicReq(void* buf, int32_t bufLen, SMDropTopicReq* pReq);
void tFreeSMDropTopicReq(SMDropTopicReq *pReq);
void tFreeSMDropTopicReq(SMDropTopicReq* pReq);
typedef struct {
char topic[TSDB_TOPIC_FNAME_LEN];
@ -3083,8 +3086,8 @@ typedef struct {
} SMqVDeleteRsp;
typedef struct {
char name[TSDB_STREAM_FNAME_LEN];
int8_t igNotExists;
char name[TSDB_STREAM_FNAME_LEN];
int8_t igNotExists;
int32_t sqlLen;
char* sql;
} SMDropStreamReq;
@ -3921,7 +3924,7 @@ int32_t tDeserializeSCMDropViewReq(void* buf, int32_t bufLen, SCMDropViewReq* pR
void tFreeSCMDropViewReq(SCMDropViewReq* pReq);
typedef struct {
char fullname[TSDB_VIEW_FNAME_LEN];
char fullname[TSDB_VIEW_FNAME_LEN];
} SViewMetaReq;
int32_t tSerializeSViewMetaReq(void* buf, int32_t bufLen, const SViewMetaReq* pReq);
int32_t tDeserializeSViewMetaReq(void* buf, int32_t bufLen, SViewMetaReq* pReq);
@ -3943,7 +3946,6 @@ int32_t tSerializeSViewMetaRsp(void* buf, int32_t bufLen, const SViewMetaRsp* pR
int32_t tDeserializeSViewMetaRsp(void* buf, int32_t bufLen, SViewMetaRsp* pRsp);
void tFreeSViewMetaRsp(SViewMetaRsp* pRsp);
#pragma pack(pop)
#ifdef __cplusplus

View File

@ -24,48 +24,70 @@
#if defined(TD_MSG_INFO_)
#undef TD_NEW_MSG_SEG
#undef TD_DEF_MSG_TYPE
#define TD_NEW_MSG_SEG(TYPE) "null",
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) MSG, MSG "-rsp",
#undef TD_NEW_MSG_SEG
#undef TD_DEF_MSG_TYPE
#undef TD_CLOSE_MSG_TYPE
#define TD_NEW_MSG_SEG(TYPE) "null",
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) MSG, MSG "-rsp",
#define TD_CLOSE_MSG_TYPE(TYPE)
char *tMsgInfo[] = {
char *tMsgInfo[] = {
#elif defined(TD_MSG_RANGE_CODE_)
#undef TD_NEW_MSG_SEG
#undef TD_DEF_MSG_TYPE
#undef TD_CLOSE_MSG_TYPE
#define TD_NEW_MSG_SEG(TYPE)
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP)
#define TD_CLOSE_MSG_TYPE(TYPE) TYPE,
int32_t tMsgRangeDict[] = {
#elif defined(TD_MSG_NUMBER_)
#undef TD_NEW_MSG_SEG
#undef TD_DEF_MSG_TYPE
#define TD_NEW_MSG_SEG(TYPE) TYPE##_NUM,
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE##_NUM, TYPE##_RSP_NUM,
#undef TD_NEW_MSG_SEG
#undef TD_DEF_MSG_TYPE
#undef TD_CLOSE_MSG_TYPE
#define TD_NEW_MSG_SEG(TYPE) TYPE##_NUM,
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE##_NUM, TYPE##_RSP_NUM,
#define TD_CLOSE_MSG_TYPE(TYPE)
enum {
enum {
#elif defined(TD_MSG_DICT_)
#undef TD_NEW_MSG_SEG
#undef TD_DEF_MSG_TYPE
#define TD_NEW_MSG_SEG(TYPE) TYPE##_NUM,
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP)
#undef TD_NEW_MSG_SEG
#undef TD_DEF_MSG_TYPE
#undef TD_CLOSE_MSG_TYPE
#define TD_NEW_MSG_SEG(TYPE) TYPE##_NUM,
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP)
#define TD_CLOSE_MSG_TYPE(type)
int32_t tMsgDict[] = {
int32_t tMsgDict[] = {
#elif defined(TD_MSG_SEG_CODE_)
#undef TD_NEW_MSG_SEG
#undef TD_DEF_MSG_TYPE
#define TD_NEW_MSG_SEG(TYPE) TYPE##_SEG_CODE,
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP)
#undef TD_NEW_MSG_SEG
#undef TD_DEF_MSG_TYPE
#undef TD_CLOSE_MSG_TYPE
#define TD_NEW_MSG_SEG(TYPE) TYPE##_SEG_CODE,
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP)
#define TD_CLOSE_MSG_TYPE(TYPE)
enum {
enum {
#else
#undef TD_NEW_MSG_SEG
#undef TD_DEF_MSG_TYPE
#define TD_NEW_MSG_SEG(TYPE) TYPE = ((TYPE##_SEG_CODE) << 8),
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE, TYPE##_RSP,
#else
enum { // WARN: new msg should be appended to segment tail
#undef TD_NEW_MSG_SEG
#undef TD_DEF_MSG_TYPE
#undef TD_CLOSE_MSG_TYPE
#define TD_NEW_MSG_SEG(TYPE) TYPE = ((TYPE##_SEG_CODE) << 8),
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE, TYPE##_RSP,
#define TD_CLOSE_MSG_TYPE(TYPE) TYPE,
enum { // WARN: new msg should be appended to segment tail
#endif
TD_NEW_MSG_SEG(TDMT_DND_MSG) // 0<<8
TD_DEF_MSG_TYPE(TDMT_DND_CREATE_MNODE, "dnode-create-mnode", NULL, NULL)
@ -82,10 +104,12 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_DND_NET_TEST, "net-test", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_CONFIG_DNODE, "config-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_SYSTABLE_RETRIEVE, "dnode-retrieve", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_UNUSED_CODE, "dnd-unused", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_MNODE_TYPE, "dnode-alter-mnode-type", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_VNODE_TYPE, "dnode-alter-vnode-type", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, "dnode-check-vnode-learner-catchup", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL)
TD_CLOSE_MSG_TYPE(TDMT_END_DND_MSG)
TD_NEW_MSG_SEG(TDMT_MND_MSG) // 1<<8
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
@ -186,6 +210,7 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_CANDIDITATE, "stream-checkpoint-remain", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_NODECHANGE_CHECK, "stream-nodechange-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GRANT_NOTIFY, "grant-notify", NULL, NULL)
@ -193,6 +218,7 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_MND_DROP_VIEW, "drop-view", SCMDropViewReq, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_VIEW_META, "view-meta", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
TD_CLOSE_MSG_TYPE(TDMT_END_MND_MSG)
TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
@ -230,7 +256,7 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_VND_EXEC_RSMA, "vnode-exec-rsma", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp)
TD_DEF_MSG_TYPE(TDMT_VND_BATCH_DEL, "batch-delete", SBatchDeleteReq, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "alter-replica", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIRM, "alter-confirm", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_HASHRANGE, "alter-hashrange", NULL, NULL)
@ -242,6 +268,7 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_VND_DROP_INDEX, "vnode-drop-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DISABLE_WRITE, "vnode-disable-write", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_MAX_MSG, "vnd-max", NULL, NULL)
TD_CLOSE_MSG_TYPE(TDMT_END_VND_MSG)
TD_NEW_MSG_SEG(TDMT_SCH_MSG) // 3<<8
TD_DEF_MSG_TYPE(TDMT_SCH_QUERY, "query", NULL, NULL)
@ -256,6 +283,7 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_TASK_NOTIFY, "task-notify", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL)
TD_CLOSE_MSG_TYPE(TDMT_END_SCH_MSG)
TD_NEW_MSG_SEG(TDMT_STREAM_MSG) //4 << 8
@ -273,9 +301,11 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_STOP, "stream-task-stop", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_HTASK_DROP, "stream-htask-drop", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
TD_CLOSE_MSG_TYPE(TDMT_END_STREAM_MSG)
TD_NEW_MSG_SEG(TDMT_MON_MSG) //5 << 8
TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL)
TD_CLOSE_MSG_TYPE(TDMT_END_MON_MSG)
TD_NEW_MSG_SEG(TDMT_SYNC_MSG) //6 << 8
TD_DEF_MSG_TYPE(TDMT_SYNC_TIMEOUT, "sync-timer", NULL, NULL)
@ -307,6 +337,8 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_SYNC_PREP_SNAPSHOT_REPLY, "sync-prep-snapshot-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
TD_CLOSE_MSG_TYPE(TDMT_END_SYNC_MSG)
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG) //7 << 8
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)
@ -316,6 +348,7 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_RESET, "vnode-stream-reset", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_MAX_MSG, "vnd-stream-max", NULL, NULL)
TD_CLOSE_MSG_TYPE(TDMT_END_VND_STREAM_MSG)
TD_NEW_MSG_SEG(TDMT_VND_TMQ_MSG) //8 << 8
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_SUBSCRIBE, "vnode-tmq-subscribe", SMqRebVgReq, SMqRebVgRsp)
@ -329,9 +362,15 @@ enum { // WARN: new msg should be appended to segment tail
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_WALINFO, "vnode-tmq-vg-walinfo", SMqPollReq, SMqDataBlkRsp)
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_VG_COMMITTEDINFO, "vnode-tmq-committedinfo", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TMQ_MAX_MSG, "vnd-tmq-max", NULL, NULL)
TD_CLOSE_MSG_TYPE(TDMT_END_TMQ_MSG)
TD_NEW_MSG_SEG(TDMT_MAX_MSG) // msg end mark
#if defined(TD_MSG_NUMBER_)
TDMT_MAX
#endif
#if defined(TD_MSG_NUMBER_)
TDMT_MAX
#endif
};

View File

@ -275,9 +275,11 @@ typedef struct {
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)
#define IS_VALID_INT(_t) ((_t) >= INT32_MIN && (_t) <= INT32_MAX)
#define IS_VALID_INT64(_t) ((_t) >= INT64_MIN && (_t) <= INT64_MAX)
#define IS_VALID_UTINYINT(_t) ((_t) >= 0 && (_t) <= UINT8_MAX)
#define IS_VALID_USMALLINT(_t) ((_t) >= 0 && (_t) <= UINT16_MAX)
#define IS_VALID_UINT(_t) ((_t) >= 0 && (_t) <= UINT32_MAX)
#define IS_VALID_UINT64(_t) ((_t) >= 0 && (_t) <= UINT64_MAX)
#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX)
#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX)

View File

@ -37,6 +37,10 @@ typedef struct SVariant {
};
} SVariant;
int32_t toIntegerEx(const char *z, int32_t n, uint32_t type, int64_t *value);
int32_t toUIntegerEx(const char *z, int32_t n, uint32_t type, uint64_t *value);
int32_t toDoubleEx(const char *z, int32_t n, uint32_t type, double *value);
int32_t toInteger(const char *z, int32_t n, int32_t base, int64_t *value);
int32_t toUInteger(const char *z, int32_t n, int32_t base, uint64_t *value);

View File

@ -152,6 +152,18 @@ typedef struct {
// clang-format off
/*-------------------------------------------------new api format---------------------------------------------------*/
typedef enum {
TSD_READER_NOTIFY_DURATION_START
} ETsdReaderNotifyType;
typedef union {
struct {
int32_t filesetId;
} duration;
} STsdReaderNotifyInfo;
typedef void (*TsdReaderNotifyCbFn)(ETsdReaderNotifyType type, STsdReaderNotifyInfo* info, void* param);
typedef struct TsdReader {
int32_t (*tsdReaderOpen)(void* pVnode, SQueryTableDataCond* pCond, void* pTableList, int32_t numOfTables,
SSDataBlock* pResBlock, void** ppReader, const char* idstr, bool countOnly,
@ -170,6 +182,9 @@ typedef struct TsdReader {
int32_t (*tsdReaderGetDataBlockDistInfo)();
int64_t (*tsdReaderGetNumOfInMemRows)();
void (*tsdReaderNotifyClosing)();
void (*tsdSetFilesetDelimited)(void* pReader);
void (*tsdSetSetNotifyCb)(void* pReader, TsdReaderNotifyCbFn notifyFn, void* param);
} TsdReader;
typedef struct SStoreCacheReader {
@ -351,6 +366,8 @@ typedef struct SStateStore {
TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid);
bool (*isIncrementalTimeStamp)(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
void (*updateInfoDestroy)(SUpdateInfo* pInfo);
void (*windowSBfDelete)(SUpdateInfo *pInfo, uint64_t count);
void (*windowSBfAdd)(SUpdateInfo *pInfo, uint64_t count);

View File

@ -118,6 +118,7 @@ typedef struct SScanLogicNode {
bool igLastNull;
bool groupOrderScan;
bool onlyMetaCtbIdx; // for tag scan with no tbname
bool filesetDelimited; // returned blocks delimited by fileset
} SScanLogicNode;
typedef struct SJoinLogicNode {
@ -433,6 +434,7 @@ typedef struct STableScanPhysiNode {
int8_t igExpired;
bool assignBlockUid;
int8_t igCheckUpdate;
bool filesetDelimited;
} STableScanPhysiNode;
typedef STableScanPhysiNode STableSeqScanPhysiNode;

View File

@ -449,6 +449,7 @@ typedef struct SVnodeModifyOpStmt {
SHashObj* pSubTableHashObj; // SHashObj<table_name, STableMeta*>
SHashObj* pTableNameHashObj; // set of table names for refreshing meta, sync mode
SHashObj* pDbFNameHashObj; // set of db names for refreshing meta, sync mode
SHashObj* pTableCxtHashObj; // temp SHashObj<tuid, STableDataCxt*> for single request
SArray* pVgDataBlocks; // SArray<SVgroupDataCxt*>
SVCreateTbReq* pCreateTblReq;
TdFilePtr fp;

View File

@ -35,6 +35,7 @@ int32_t streamStateBegin(SStreamState* pState);
int32_t streamStateCommit(SStreamState* pState);
void streamStateDestroy(SStreamState* pState, bool remove);
int32_t streamStateDeleteCheckPoint(SStreamState* pState, TSKEY mark);
int32_t streamStateDelTaskDb(SStreamState* pState);
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen);
@ -133,4 +134,4 @@ char* streamStateIntervalDump(SStreamState* pState);
}
#endif
#endif /* ifndef _STREAM_STATE_H_ */
#endif /* ifndef _STREAM_STATE_H_ */

View File

@ -58,7 +58,9 @@ typedef struct SStreamTask SStreamTask;
typedef struct SStreamQueue SStreamQueue;
typedef struct SStreamTaskSM SStreamTaskSM;
#define SSTREAM_TASK_VER 2
#define SSTREAM_TASK_VER 2
#define SSTREAM_TASK_INCOMPATIBLE_VER 1
#define SSTREAM_TASK_NEED_CONVERT_VER 2
enum {
STREAM_STATUS__NORMAL = 0,
@ -110,6 +112,7 @@ typedef enum {
TASK_LEVEL__SOURCE = 1,
TASK_LEVEL__AGG,
TASK_LEVEL__SINK,
TASK_LEVEL_SMA,
} ETASK_LEVEL;
enum {
@ -213,9 +216,6 @@ int32_t streamQueuePush(SStreamQueue1* pQueue, SStreamQueueItem* pItem);
SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue);
#endif
int32_t streamInit();
void streamCleanUp();
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type);
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
@ -304,11 +304,16 @@ typedef struct SStreamTaskId {
typedef struct SCheckpointInfo {
int64_t startTs;
int64_t checkpointId;
int64_t checkpointVer; // latest checkpointId version
int64_t processedVer; // already processed ver, that has generated results version.
int64_t checkpointVer; // latest checkpointId version
int64_t processedVer;
int64_t nextProcessVer; // current offset in WAL, not serialize it
int64_t failedId; // record the latest failed checkpoint id
int64_t checkpointingId;
int32_t downstreamAlignNum;
int32_t checkpointNotReadyTasks;
bool dispatchCheckpointTrigger;
int64_t msgVer;
} SCheckpointInfo;
typedef struct SStreamStatus {
@ -394,7 +399,8 @@ typedef struct SHistoryTaskInfo {
int32_t tickCount;
int32_t retryTimes;
int32_t waitInterval;
int64_t haltVer; // offset in wal when halt the stream task
int64_t haltVer; // offset in wal when halt the stream task
bool operatorOpen; // false by default
} SHistoryTaskInfo;
typedef struct STaskOutputInfo {
@ -447,12 +453,11 @@ struct SStreamTask {
int64_t checkReqId;
SArray* checkReqIds; // shuffle
int32_t refCnt;
int64_t checkpointingId;
int32_t checkpointAlignCnt;
int32_t checkpointNotReadyTasks;
int32_t transferStateAlignCnt;
struct SStreamMeta* pMeta;
SSHashObj* pNameMap;
void* pBackend;
int64_t backendRefId;
char reserve[256];
};
@ -490,20 +495,25 @@ typedef struct SStreamMeta {
int32_t walScanCounter;
void* streamBackend;
int64_t streamBackendRid;
SHashObj* pTaskBackendUnique;
SHashObj* pTaskDbUnique;
TdThreadMutex backendMutex;
SMetaHbInfo* pHbInfo;
STaskUpdateInfo updateInfo;
SHashObj* pUpdateTaskSet;
int32_t numOfStreamTasks; // this value should be increased when a new task is added into the meta
int32_t numOfPausedTasks;
int32_t chkptNotReadyTasks;
int64_t rid;
int64_t chkpId;
int32_t chkpCap;
SArray* chkpSaved;
SArray* chkpInUse;
int32_t chkpCap;
SRWLatch chkpDirLock;
void* qHandle;
int32_t pauseTaskNum;
void* bkdChkptMgt;
} SStreamMeta;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
@ -659,7 +669,7 @@ int32_t tDecodeStreamCheckpointReadyMsg(SDecoder* pDecoder, SStreamCheckpointRea
typedef struct STaskStatusEntry {
STaskId id;
int32_t status;
int32_t statusLastDuration; // to record the last duration of current status
int32_t statusLastDuration; // to record the last duration of current status
int64_t stage;
int32_t nodeId;
int64_t verStart; // start version in WAL, only valid for source task
@ -668,10 +678,12 @@ typedef struct STaskStatusEntry {
int32_t relatedHTask; // has related fill-history task
int64_t activeCheckpointId; // current active checkpoint id
bool checkpointFailed; // denote if the checkpoint is failed or not
bool inputQChanging; // inputQ is changing or not
int64_t inputQUnchangeCounter;
double inputQUsed; // in MiB
double inputRate;
double sinkQuota; // existed quota size for sink task
double sinkDataSize; // sink to dst data size
double sinkQuota; // existed quota size for sink task
double sinkDataSize; // sink to dst data size
} STaskStatusEntry;
typedef struct SStreamHbMsg {
@ -832,11 +844,14 @@ int32_t streamMetaReopen(SStreamMeta* pMeta);
void streamMetaInitBackend(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
int32_t streamMetaReloadAllTasks(SStreamMeta* pMeta);
int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta);
void streamMetaNotifyClose(SStreamMeta* pMeta);
int32_t streamTaskSetDb(SStreamMeta* pMeta, void* pTask, char* key);
void streamMetaStartHb(SStreamMeta* pMeta);
bool streamMetaTaskInTimer(SStreamMeta* pMeta);
int32_t streamMetaUpdateTaskDownstreamStatus(SStreamTask* pTask, int64_t startTs, int64_t endTs, bool succ);
int32_t streamMetaUpdateTaskDownstreamStatus(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs,
int64_t endTs, bool ready);
void streamMetaRLock(SStreamMeta* pMeta);
void streamMetaRUnLock(SStreamMeta* pMeta);
void streamMetaWLock(SStreamMeta* pMeta);
@ -855,8 +870,10 @@ int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHa
int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SRpcMsg* pMsg,
int8_t isSucceed);
SStreamTaskSM* streamCreateStateMachine(SStreamTask* pTask);
void* streamDestroyStateMachine(SStreamTaskSM* pSM);
#ifdef __cplusplus
}
#endif
#endif /* ifndef _STREAM_H_ */
#endif /* ifndef _STREAM_H_ */

View File

@ -55,6 +55,7 @@ int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *p
int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo);
void windowSBfDelete(SUpdateInfo *pInfo, uint64_t count);
void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count);
bool isIncrementalTimeStamp(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
#ifdef __cplusplus
}

View File

@ -289,6 +289,9 @@ const char* syncStr(ESyncState state);
int32_t syncNodeGetConfig(int64_t rid, SSyncCfg *cfg);
// util
int32_t syncSnapInfoDataRealloc(SSnapshot* pSnap, int32_t size);
#ifdef __cplusplus
}
#endif

View File

@ -558,7 +558,6 @@ int32_t* taosGetErrno();
#define TSDB_CODE_GRANT_GEN_IVLD_KEY TAOS_DEF_ERROR_CODE(0, 0x0812)
#define TSDB_CODE_GRANT_GEN_APP_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0813)
#define TSDB_CODE_GRANT_GEN_ENC_IVLD_KLEN TAOS_DEF_ERROR_CODE(0, 0x0814)
#define TSDB_CODE_GRANT_PAR_IVLD_DIST TAOS_DEF_ERROR_CODE(0, 0x0815)
// sync
// #define TSDB_CODE_SYN_INVALID_CONFIG TAOS_DEF_ERROR_CODE(0, 0x0900) // 2.x

View File

@ -139,6 +139,8 @@ int32_t getWordLength(char type);
int32_t tsDecompressIntImpl_Hw(const char *const input, const int32_t nelements, char *const output, const char type);
int32_t tsDecompressFloatImplAvx512(const char *const input, const int32_t nelements, char *const output);
int32_t tsDecompressFloatImplAvx2(const char *const input, const int32_t nelements, char *const output);
int32_t tsDecompressTimestampAvx512(const char* const input, const int32_t nelements, char *const output, bool bigEndian);
int32_t tsDecompressTimestampAvx2(const char* const input, const int32_t nelements, char *const output, bool bigEndian);
/*************************************************************************
* STREAM COMPRESSION

View File

@ -288,6 +288,7 @@ typedef enum ELogicConditionType {
#define TSDB_CONN_ACTIVE_KEY_LEN 255
#define TSDB_DEFAULT_PKT_SIZE 65480 // same as RPC_MAX_UDP_SIZE
#define TSDB_SNAP_DATA_PAYLOAD_SIZE (1 * 1024 * 1024)
#define TSDB_PAYLOAD_SIZE TSDB_DEFAULT_PKT_SIZE
#define TSDB_DEFAULT_PAYLOAD_SIZE 5120 // default payload size, greater than PATH_MAX value
@ -305,7 +306,7 @@ typedef enum ELogicConditionType {
#define TSDB_SYNC_APPLYQ_SIZE_LIMIT 512
#define TSDB_SYNC_NEGOTIATION_WIN 512
#define TSDB_SYNC_SNAP_BUFFER_SIZE 2048
#define TSDB_SYNC_SNAP_BUFFER_SIZE 1024
#define TSDB_TBNAME_COLUMN_INDEX (-1)
#define TSDB_MULTI_TABLEMETA_MAX_NUM 100000 // maximum batch size allowed to load table meta

View File

@ -38,3 +38,4 @@ source /etc/profile
${csudo}mkdir -p ${corePath} ||:
${csudo}sysctl -w kernel.core_pattern=${corePath}/core-%e-%p ||:
${csudo}echo "${corePath}/core-%e-%p" | ${csudo}tee /proc/sys/kernel/core_pattern ||:
${csudo}echo "kernel.core_pattern = ${corePath}/core_%e-%p" >> /etc/sysctl.conf ||:

View File

@ -65,14 +65,32 @@ extern "C" {
#define IS_INVALID_COL_LEN(len) ((len) <= 0 || (len) >= TSDB_COL_NAME_LEN)
#define IS_INVALID_TABLE_LEN(len) ((len) <= 0 || (len) >= TSDB_TABLE_NAME_LEN)
//#define TS "_ts"
//#define TS_LEN 3
#define VALUE "_value"
#define VALUE_LEN 6
#define VALUE_LEN (sizeof(VALUE)-1)
#define OTD_JSON_FIELDS_NUM 4
#define MAX_RETRY_TIMES 10
typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType;
#define IS_SAME_CHILD_TABLE (elements->measureTagsLen == info->preLine.measureTagsLen \
&& memcmp(elements->measure, info->preLine.measure, elements->measureTagsLen) == 0)
#define IS_SAME_SUPER_TABLE (elements->measureLen == info->preLine.measureLen \
&& memcmp(elements->measure, info->preLine.measure, elements->measureLen) == 0)
#define IS_SAME_KEY (maxKV->type == kv->type && maxKV->keyLen == kv->keyLen && memcmp(maxKV->key, kv->key, kv->keyLen) == 0)
#define IS_SLASH_LETTER_IN_MEASUREMENT(sql) \
(*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE))
#define MOVE_FORWARD_ONE(sql, len) (memmove((void *)((sql)-1), (sql), len))
#define PROCESS_SLASH_IN_MEASUREMENT(key, keyLen) \
for (int i = 1; i < keyLen; ++i) { \
if (IS_SLASH_LETTER_IN_MEASUREMENT(key + i)) { \
MOVE_FORWARD_ONE(key + i, keyLen - i); \
keyLen--; \
} \
}
typedef enum {
SCHEMA_ACTION_NULL,
@ -83,18 +101,6 @@ typedef enum {
SCHEMA_ACTION_CHANGE_TAG_SIZE,
} ESchemaAction;
typedef struct {
const void *key;
int32_t keyLen;
void *value;
bool used;
}Node;
typedef struct NodeList{
Node data;
struct NodeList* next;
}NodeList;
typedef struct {
char *measure;
char *tags;
@ -117,7 +123,6 @@ typedef struct {
int32_t sTableNameLen;
char childTableName[TSDB_TABLE_NAME_LEN];
uint64_t uid;
// void *key; // for openTsdb
SArray *tags;
@ -161,7 +166,8 @@ typedef struct {
typedef struct {
int64_t id;
SMLProtocolType protocol;
TSDB_SML_PROTOCOL_TYPE protocol;
int8_t precision;
bool reRun;
bool dataFormat; // true means that the name and order of keys in each line are the same(only for influx protocol)
@ -201,29 +207,8 @@ typedef struct {
bool needModifySchema;
} SSmlHandle;
#define IS_SAME_CHILD_TABLE (elements->measureTagsLen == info->preLine.measureTagsLen \
&& memcmp(elements->measure, info->preLine.measure, elements->measureTagsLen) == 0)
#define IS_SAME_SUPER_TABLE (elements->measureLen == info->preLine.measureLen \
&& memcmp(elements->measure, info->preLine.measure, elements->measureLen) == 0)
#define IS_SAME_KEY (maxKV->keyLen == kv.keyLen && memcmp(maxKV->key, kv.key, kv.keyLen) == 0)
#define IS_SLASH_LETTER_IN_MEASUREMENT(sql) \
(*((sql)-1) == SLASH && (*(sql) == COMMA || *(sql) == SPACE))
#define MOVE_FORWARD_ONE(sql, len) (memmove((void *)((sql)-1), (sql), len))
#define PROCESS_SLASH_IN_MEASUREMENT(key, keyLen) \
for (int i = 1; i < keyLen; ++i) { \
if (IS_SLASH_LETTER_IN_MEASUREMENT(key + i)) { \
MOVE_FORWARD_ONE(key + i, keyLen - i); \
keyLen--; \
} \
}
extern int64_t smlFactorNS[3];
extern int64_t smlFactorS[3];
extern int64_t smlFactorNS[];
extern int64_t smlFactorS[];
typedef int32_t (*_equal_fn_sml)(const void *, const void *);
@ -231,16 +216,10 @@ SSmlHandle *smlBuildSmlInfo(TAOS *taos);
void smlDestroyInfo(SSmlHandle *info);
int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *offset);
int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset);
//SArray *smlJsonParseTags(char *start, char *end);
bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg);
//void* nodeListGet(NodeList* list, const void *key, int32_t len, _equal_fn_sml fn);
//int nodeListSet(NodeList** list, const void *key, int32_t len, void* value, _equal_fn_sml fn);
//int nodeListSize(NodeList* list);
bool smlDoubleToInt64OverFlow(double num);
int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2);
bool smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg);
int64_t smlGetTimeValue(const char *value, int32_t len, uint8_t fromPrecision, uint8_t toPrecision);
int8_t smlGetTsTypeByLen(int32_t len);
SSmlTableInfo* smlBuildTableInfo(int numRows, const char* measure, int32_t measureLen);
SSmlSTableMeta* smlBuildSTableMeta(bool isDataFormat);
int32_t smlSetCTableName(SSmlTableInfo *oneTable);
@ -253,12 +232,45 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg);
uint8_t smlGetTimestampLen(int64_t num);
void smlDestroyTableInfo(void *para);
void freeSSmlKv(void* data);
void freeSSmlKv(void* data);
int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
int32_t smlParseTelnetString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
int32_t smlParseJSON(SSmlHandle *info, char *payload);
void smlStrReplace(char* src, int32_t len);
SSmlSTableMeta* smlBuildSuperTableInfo(SSmlHandle *info, SSmlLineInfo *currElement);
bool isSmlTagAligned(SSmlHandle *info, int cnt, SSmlKv *kv);
bool isSmlColAligned(SSmlHandle *info, int cnt, SSmlKv *kv);
int32_t smlProcessChildTable(SSmlHandle *info, SSmlLineInfo *elements);
int32_t smlProcessSuperTable(SSmlHandle *info, SSmlLineInfo *elements);
int32_t smlJoinMeasureTag(SSmlLineInfo *elements);
void smlBuildTsKv(SSmlKv *kv, int64_t ts);
int32_t smlParseEndTelnetJson(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs, SSmlKv *kv);
int32_t smlParseEndLine(SSmlHandle *info, SSmlLineInfo *elements, SSmlKv *kvTs);
static inline bool smlDoubleToInt64OverFlow(double num) {
if (num >= (double)INT64_MAX || num <= (double)INT64_MIN) return true;
return false;
}
static inline void smlStrReplace(char* src, int32_t len){
if (!tsSmlDot2Underline) return;
for(int i = 0; i < len; i++){
if(src[i] == '.'){
src[i] = '_';
}
}
}
static inline int8_t smlGetTsTypeByLen(int32_t len) {
if (len == TSDB_TIME_PRECISION_SEC_DIGITS) {
return TSDB_TIME_PRECISION_SECONDS;
} else if (len == TSDB_TIME_PRECISION_MILLI_DIGITS) {
return TSDB_TIME_PRECISION_MILLI;
} else {
return -1;
}
}
#ifdef __cplusplus
}
#endif

View File

@ -732,7 +732,7 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
return -1;
}
newstr[0] = '"';
strncpy(newstr+1, str, len);
memcpy(newstr+1, str, len);
newstr[len + 1] = '"';
newstr[len + 2] = '\0';
str = newstr;

Some files were not shown because too many files have changed in this diff Show More