Merge branch 'main' into fix/test_1x

This commit is contained in:
jiajingbin 2023-09-01 20:46:29 +08:00 committed by GitHub
commit 9c66c81e1d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
444 changed files with 35879 additions and 13302 deletions

View File

@ -0,0 +1,19 @@
# apr-util
ExternalProject_Add(aprutil-1
URL https://dlcdn.apache.org//apr/apr-util-1.6.3.tar.gz
URL_HASH SHA256=2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/apache/apr-util.git
#GIT_TAG 1.5.4
SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1
#CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)

View File

@ -0,0 +1,19 @@
# apr
ExternalProject_Add(apr-1
URL https://dlcdn.apache.org//apr/apr-1.7.4.tar.gz
URL_HASH SHA256=a4137dd82a185076fa50ba54232d920a17c6469c30b0876569e1c2a05ff311d9
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/apache/apr.git
#GIT_TAG 1.5.2
SOURCE_DIR "${TD_CONTRIB_DIR}/apr"
BUILD_IN_SOURCE TRUE
UPDATE_DISCONNECTED TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --enable-shared=no
#CONFIGURE_COMMAND ./configure
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)

View File

@ -77,6 +77,12 @@ ELSE ()
SET(TD_TAOS_TOOLS TRUE)
ENDIF ()
IF (${TD_WINDOWS})
SET(TAOS_LIB taos_static)
ELSE ()
SET(TAOS_LIB taos)
ENDIF ()
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")

View File

@ -125,6 +125,16 @@ option(
ON
)
IF(${TD_LINUX})
option(
BUILD_WITH_COS
"If build with cos"
ON
)
ENDIF ()
option(
BUILD_WITH_SQLITE
"If build with sqlite"

View File

@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.1.0.4.alpha")
SET(TD_VER_NUMBER "3.1.1.0.alpha")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)

View File

@ -0,0 +1,12 @@
# cos
ExternalProject_Add(cos
GIT_REPOSITORY https://github.com/tencentyun/cos-c-sdk-v5.git
GIT_TAG v5.0.16
SOURCE_DIR "${TD_CONTRIB_DIR}/cos-c-sdk-v5"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@ -0,0 +1,17 @@
# curl
ExternalProject_Add(curl
URL https://curl.se/download/curl-8.2.1.tar.gz
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/curl/curl.git
#GIT_TAG curl-7_88_1
SOURCE_DIR "${TD_CONTRIB_DIR}/curl"
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd
#CONFIGURE_COMMAND ./configure --without-ssl
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)

View File

@ -0,0 +1,14 @@
# cos
ExternalProject_Add(mxml
GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git
GIT_TAG v2.12
SOURCE_DIR "${TD_CONTRIB_DIR}/mxml"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --enable-shared=no
#CONFIGURE_COMMAND ./configure
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)

View File

@ -6,6 +6,39 @@ function(cat IN_FILE OUT_FILE)
file(APPEND ${OUT_FILE} "${CONTENTS}")
endfunction(cat IN_FILE OUT_FILE)
if(${TD_LINUX})
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
if(${BUILD_WITH_COS})
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
endif(${BUILD_WITH_COS})
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
if(${BUILD_WITH_COS})
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
endif(${BUILD_WITH_COS})
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
endif(${TD_LINUX})
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -122,6 +155,16 @@ if(${BUILD_WITH_SQLITE})
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_SQLITE})
# cos
if(${BUILD_WITH_COS})
#cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_COS)
endif(${BUILD_WITH_COS})
# lucene
if(${BUILD_WITH_LUCENE})
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -347,6 +390,31 @@ if (${BUILD_WITH_ROCKSDB})
endif()
endif()
# cos
if(${BUILD_WITH_COS})
if(${TD_LINUX})
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
#ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
option(ENABLE_TEST "Enable the tests" OFF)
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
MESSAGE("$ENV{HOME}/.cos-local.1/include")
set(CMAKE_BUILD_TYPE debug)
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
set(CMAKE_PROJECT_NAME cos_c_sdk)
add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL)
target_include_directories(
cos_c_sdk
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cos-c-sdk-v5/cos_c_sdk>
)
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
else()
endif(${TD_LINUX})
endif(${BUILD_WITH_COS})
# lucene
# To support build on ubuntu: sudo apt-get install libboost-all-dev
if(${BUILD_WITH_LUCENE})

View File

@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB})
add_subdirectory(rocksdb)
endif(${BUILD_WITH_ROCKSDB})
# cos
if(${BUILD_WITH_COS})
add_subdirectory(cos)
endif(${BUILD_WITH_COS})
if(${BUILD_WITH_LUCENE})
add_subdirectory(lucene)
endif(${BUILD_WITH_LUCENE})

View File

@ -0,0 +1,49 @@
add_executable(cosTest "")
target_sources(cosTest
PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
)
#find_path(APR_INCLUDE_DIR apr-1/apr_time.h)
#find_path(APR_UTIL_INCLUDE_DIR apr/include/apr-1/apr_md5.h)
#find_path(MINIXML_INCLUDE_DIR mxml.h)
#find_path(CURL_INCLUDE_DIR curl/curl.h)
#include_directories (${MINIXML_INCLUDE_DIR})
#include_directories (${CURL_INCLUDE_DIR})
FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
#FIND_PROGRAM(APU_CONFIG_BIN NAMES apu-config apu-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
IF (APR_CONFIG_BIN)
EXECUTE_PROCESS(
COMMAND ${APR_CONFIG_BIN} --includedir
OUTPUT_VARIABLE APR_INCLUDE_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE
)
ENDIF()
#IF (APU_CONFIG_BIN)
# EXECUTE_PROCESS(
# COMMAND ${APU_CONFIG_BIN} --includedir
# OUTPUT_VARIABLE APR_UTIL_INCLUDE_DIR
# OUTPUT_STRIP_TRAILING_WHITESPACE
# )
#ENDIF()
include_directories (${APR_INCLUDE_DIR})
#include_directories (${APR_UTIL_INCLUDE_DIR})
target_include_directories(
cosTest
PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk"
)
#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
#find_library(MINIXML_LIBRARY mxml)
#find_library(CURL_LIBRARY curl)
target_link_libraries(cosTest cos_c_sdk)
target_link_libraries(cosTest apr-1})
target_link_libraries(cosTest aprutil-1})
target_link_libraries(cosTest mxml)
target_link_libraries(cosTest curl)

3095
contrib/test/cos/main.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -24,24 +24,19 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
Tips:(c interface for example)
- A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
- A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
- On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
- Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block;
- If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
- If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
- The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
- Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again;
- Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
- Position is to obtain the current consumption position, which is the position to be taken next time, not the current consumption position
- Commit is the submission of the consumption location. Without parameters, it is the submission of the current consumption location (the location to be taken next time, not the current consumption location). With parameters, it is the location in the submission parameters (i.e. the location to be taken after the next exit and restart)
- Seek is to set the consumer's consumption position. Wherever the seek goes, the position will be returned, all of which are the positions to be taken next time
- Seek does not affect commit, commit does not affect seek, independent of each other, the two are different concepts
- The begin interface is the offset of the first data in wal, and the end interface is the offset+1 of the last data in wal10.
- Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
- Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
- The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
- Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
1. A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
2. A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
3. On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
4. Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block;
5. If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
6. If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
7. The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
8. Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again;
9. Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
10. Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
11. Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
12. The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
13. Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
## Data Schema and API
@ -104,8 +99,6 @@ The related schemas and APIs in various languages are described as follows:
DLL_EXPORT const char *tmq_err2str(int32_t code);
```
For more information, see [C/C++ Connector](/reference/connector/cpp).
The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below.
</TabItem>
@ -120,7 +113,19 @@ Set<String> subscription() throws SQLException;
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
Set<TopicPartition> assignment() throws SQLException;
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
void close() throws SQLException;
```

View File

@ -887,4 +887,4 @@ The `pycumsum` function finds the cumulative sum for all data in the input colum
</details>
## Manage and Use UDF
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../taos-sql/udf/).

View File

@ -62,12 +62,13 @@ serverPort 6030
For all the dnodes in a TDengine cluster, the below parameters must be configured exactly the same, any node whose configuration is different from dnodes already in the cluster can't join the cluster.
| **#** | **Parameter** | **Definition** |
| ----- | ------------------ | ------------------------------------------- |
| 1 | statusInterval | The interval by which dnode reports its status to mnode |
| 2 | timezone | Timezone |
| 3 | locale | System region and encoding |
| 4 | charset | Character set |
| **#** | **Parameter** | **Definition** |
| ----- | ---------------- | ----------------------------------------------------------------------------- |
| 1 | statusInterval | The interval by which dnode reports its status to mnode |
| 2 | timezone | Timezone |
| 3 | locale | System region and encoding |
| 4 | charset | Character set |
| 5 | ttlChangeOnWrite | Whether the ttl expiration time changes with the table modification operation |
## Start Cluster
@ -97,7 +98,7 @@ Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `
CREATE DNODE "h2.taos.com:6030";
````
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos`

View File

@ -43,6 +43,8 @@ In TDengine, the data types below can be used when specifying a column or tag.
| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
| 16 | VARCHAR | User-defined | Alias of BINARY |
| 17 | GEOMETRY | User-defined | Geometry |
| 18 | VARBINARY | User-defined | Binary data with variable length
:::note
- Each row of the table cannot be longer than 48KB (64KB since version 3.0.5.0) (note that each BINARY/NCHAR/GEOMETRY column takes up an additional 2 bytes of storage space).
@ -57,7 +59,7 @@ In TDengine, the data types below can be used when specifying a column or tag.
| 3 | POLYGON((1.0 1.0, 2.0 2.0, 1.0 1.0)) | 13+3*16 | 13+4094*16 | +16 |
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
- VARBINARY is a data type that stores binary data, with a maximum length of 65517 bytes and a maximum length of 16382 bytes for tag columns. Binary data can be written through SQL or schemaless (which needs to be converted to a string starting with \x), or written through stmt (which can directly use binary). Display starting with hexadecimal starting with \x.
:::
## Constants

View File

@ -9,7 +9,7 @@ description: This document describes how to query data in TDengine.
```sql
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
SELECT [DISTINCT] select_list
SELECT [hints] [DISTINCT] [TAGS] select_list
from_clause
[WHERE condition]
[partition_by_clause]
@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
[LIMIT limit_val [OFFSET offset_val]]
[>> export_file]
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN
select_list:
select_expr [, select_expr] ...
@ -70,6 +75,29 @@ order_expr:
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
```
## Hints
Hints are a means of user control over query optimization for individual statements. Hints will be ignore automatically if they are not applicable to the current query statement. The specific instructions are as follows:
- Hints syntax starts with `/*+` and ends with `*/`, spaces are allowed before or after.
- Hints syntax can only follow the SELECT keyword.
- Each hints can contain multiple hint, separated by spaces. When multiple hints conflict or are identical, whichever comes first takes effect.
- When an error occurs with a hint in hints, the effective hint before the error is still valid, and the current and subsequent hints are ignored.
- hint_param_list are arguments to each hint, which varies according to each hint.
The list of currently supported Hints is as follows:
| **Hint** | **Params** | **Comment** | **Scopt** |
| :-----------: | -------------- | -------------------------- | -------------------------- |
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
For example:
```sql
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
```
## Lists
A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list.
@ -197,6 +225,14 @@ The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolu
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
```
### TAGS Query
The TAGS keyword returns only tag columns from all child tables when only tag columns are specified. One row containing tag columns is returned for each child table.
```sql
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
```
## Query Objects
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.

View File

@ -49,3 +49,5 @@ You can also add filter conditions to limit the results.
6. You can' create index on a normal table or a child table.
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
8. The newly created super table will randomly generate an index name for the first column of tags, which is composed of the name tag0 column with 23 random bytes, and can be rebuilt or dropped.

View File

@ -402,7 +402,7 @@ CAST(expr AS type_name)
**Return value type**: The type specified by parameter `type_name`
**Applicable data types**: All data types except JSON
**Applicable data types**: All data types except JSON and VARBINARY. If type_name is VARBINARY, expr can only be VARCHAR.
**Nested query**: It can be used in both the outer query and inner query in a nested query.
@ -698,7 +698,7 @@ ELAPSED(ts_primary_key [, time_unit])
LEASTSQUARES(expr, start_val, step_val)
```
**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value.
**Description**: The linear regression function of a specified column, `start_val` is the initial value and `step_val` is the step value.
**Return value type**: A string in the format of "(slope, intercept)"

View File

@ -168,3 +168,11 @@ All [scalar functions](../function/#scalar-functions) are available in stream pr
- [unique](../function/#unique)
- [mode](../function/#mode)
## Pause\Resume stream
1.pause stream
PAUSE STREAM [IF EXISTS] stream_name;
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported; If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, paused all stream tasks.
2.resume stream
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported. If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, all of the stream tasks will be resumed. If "IGNORE UntREATED" is specified, data written during the pause period of stream is ignored when resuming stream.

View File

@ -19,6 +19,9 @@ index_option:
functions:
function [, function] ...
```
### tag Indexing
[tag index](../tag-index)
### SMA Indexing

View File

@ -36,6 +36,7 @@ REST connection supports all platforms that can run Java.
| taos-jdbcdriver version | major changes | TDengine version |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
| 3.2.5 | Subscription add committed() and assignment() method | 3.1.0.3 or later |
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
| 3.2.2 | Subscription add seek function | 3.0.5.0 or later |
@ -1019,14 +1020,19 @@ while(true) {
#### Assignment subscription Offset
```java
// get topicPartition
Set<TopicPartition> assignment() throws SQLException;
// get offset
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
// Overrides the fetch offsets that the consumer will use on the next poll(timeout).
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
```
Example usage is as follows.
@ -1052,6 +1058,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
}
```
#### Commit offset
If `enable.auto.commit` is false, offset can be submitted manually.
```java
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
// async commit only support jni connection
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
```
#### Close subscriptions
```java
@ -1174,7 +1192,6 @@ public abstract class ConsumerLoop {
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");

View File

@ -13,7 +13,7 @@ After TDengine starts, it automatically writes many metrics in specific interval
To deploy TDinsight, we need
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter).
- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper).
- taosKeeper has been installed and running, please note the monitor-related items in taos.cfg file need be configured. Refer to [taosKeeper](../taosKeeper) for details.
Please record
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
@ -80,7 +80,7 @@ chmod +x TDinsight.sh
./TDinsight.sh
```
This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications.
This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc.
Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard.
@ -112,9 +112,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
-i, --tdinsight-uid <string> Replace with a non-space ASCII code as the dashboard id. [default: tdinsight]
-t, --tdinsight-title <string> Dashboard title. [default: TDinsight]
-e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false]
-E, --external-notifier <string> Apply external notifier uid to TDinsight dashboard.
```
Most command-line options can take effect the same as environment variables.
@ -132,7 +129,10 @@ Most command-line options can take effect the same as environment variables.
| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight `uid` of the dashboard. [default: tdinsight] |
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight dashboard title. [Default: TDinsight] | -e | -tdinsight-title
| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | If the dashboard is configured to be editable. [Default: false] | -e | --external
| -E | --external-notifier | EXTERNAL_NOTIFIER | Apply the external notifier uid to the TDinsight dashboard. | -s
:::note
The `-E` option is deprecated. We use Grafana unified alerting function instead.
:::
Suppose you start a TDengine database on host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script.
@ -140,18 +140,6 @@ Suppose you start a TDengine database on host `tdengine` with HTTP API port `604
sudo . /TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
```
We provide a "-E" option to configure TDinsight to use the existing Notification Channel from the command line. Assuming your Grafana user and password is `admin:admin`, use the following command to get the `uid` of an existing notification channel.
```bash
curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifications | jq
```
Use the `uid` value obtained above as `-E` input.
```bash
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
```
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
```bash

View File

@ -32,8 +32,10 @@ All data in tag_set is automatically converted to the NCHAR data type and does n
In the schemaless writing data line protocol, each data item in the field_set needs to be described with its data type. Let's explain in detail:
- If there are English double quotes on both sides, it indicates the VARCHAR(N) type. For example, `"abc"`.
- If there are double quotes on both sides and an L prefix, it means NCHAR(N) type. For example, `L"error message"`.
- If there are English double quotes on both sides, it indicates the VARCHAR type. For example, `"abc"`.
- If there are double quotes on both sides and a L/l prefix, it means NCHAR type. For example, `L"error message"`.
- If there are double quotes on both sides and a G/g prefix, it means GEOMETRY type. For example `G"Point(4.343 89.342)"`.
- If there are double quotes on both sides and a B/b prefix, it means VARBINARY type. Hexadecimal start with \x or string can be used in double quotes. For example `B"\x98f46e"` `B"hello"`.
- Spaces, equals sign (=), comma (,), double quote ("), and backslash (\\) need to be escaped with a backslash (\\) in front. (All refer to the ASCII character). The rules are as follows:
| **Serial number** | **Element** | **Escape characters** |
@ -110,7 +112,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
9. Due to the fact that SQL table names do not support period (.), schemaless has also processed period (.). If there is a period (.) in the table name automatically created by schemaless, it will be automatically replaced with an underscore (\_). If you manually specify a sub table name, if there is a dot (.) in the sub table name, it will also be converted to an underscore (\_)
10. Taos.cfg adds the configuration of smlTsDefaultName (with a string value), which only works on the client side. After configuration, the time column name of the schemaless automatic table creation can be set through this configuration. If not configured, defaults to _ts.
11. Super table name or child table name are case sensitive.
:::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
:::

View File

@ -218,11 +218,11 @@ The example to query the average system memory usage for the specified interval
### Importing the Dashboard
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x.
You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x. Please note TDinsight for 3.x needs to configure and run taoskeeper correctly. Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp)
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)).
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:

440
docs/en/20-third-party/70-seeq.md vendored Normal file
View File

@ -0,0 +1,440 @@
---
sidebar_label: Seeq
title: Seeq
description: How to use Seeq and TDengine to perform time series data analysis
---
# How to use Seeq and TDengine to perform time series data analysis
## Introduction
Seeq is an advanced analytics software for the manufacturing industry and the Industrial Internet of Things (IIoT). Seeq supports the use of machine learning innovations within process manufacturing organizations. These capabilities enable organizations to deploy their own or third-party machine learning algorithms into advanced analytics applications used by frontline process engineers and subject matter experts, thus extending the efforts of a single data scientist to many frontline workers.
With the TDengine Java connector, Seeq effortlessly supports querying time series data provided by TDengine and offers functionalities such as data visualization, analysis, and forecasting.
### Install Seeq
Please download Seeq Server and Seeq Data Lab software installation package from the [Seeq official website](https://www.seeq.com/customer-download).
### Install and start Seeq Server
```
tar xvzf seeq-server-xxx.tar.gz
cd seeq-server-installer
sudo ./install
sudo seeq service enable
sudo seeq start
```
### Install and start Seeq Data Lab Server
Seeq Data Lab needs to be installed on a separate server from Seeq Server and connected to Seeq Server through configuration. For detailed installation and configuration instructions, please refer to [the official documentation](https://support.seeq.com/space/KB/1034059842).
```
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
sudo seeq config set Network/DataLab/Hostname localhost
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
# If the main Seeq server is configured to listen over HTTPS
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
# If the main Seeq server is NOT configured to listen over HTTPS
sudo seeq config set Network/Webserver/Port <value>
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
```
### Install TDengine on-premise instance
See [Quick Install from Package](../../get-started).
### Or use TDengine Cloud
Register for a [TDengine Cloud](https://cloud.tdengine.com) account and log in to your account.
## Make Seeq be able to access TDengine
1. Get data location configuration
```
sudo seeq config get Folders/Data
```
2. Download TDengine Java connector from maven.org. Please use the latest version (Current is 3.2.5, https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar).
3. Restart Seeq server
```
sudo seeq restart
```
4. Input License
Use a browser to access ip:34216 and input the license according to the guide.
## How to use Seeq to analyze time-series data that TDengine serves
This chapter demonstrates how to use Seeq software in conjunction with TDengine for time series data analysis.
### Scenario Overview
The example scenario involves a power system where users collect electricity consumption data from metering devices at a power station on a daily basis. This data is stored in a TDengine cluster. The user now wants to predict how the electricity consumption will develop and purchase additional equipment to support it. The electricity consumption varies with monthly orders, and seasonal variations also affect the power consumption. Since the city is located in the Northern Hemisphere, more electricity is consumed during the summer. We will use simulated data to reflect these assumptions.
### Schema
```
CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS (device NCHAR(20));
CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
```
![Seeq demo schema](./seeq/seeq-demo-schema.webp)
### Mock data
```
python mockdata.py
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
```
The source code is hosted at [GitHub Repository](https://github.com/sangshuduo/td-forecasting).
### Using Seeq for data analysis
#### Data Source configuration
Please login with Seeq administrator and create a few data sources as following.
- Power
```
{
"QueryDefinitions": [
{
"Name": "PowerNum",
"Type": "SIGNAL",
"Sql": "SELECT ts, num FROM meters",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Num",
"Sql": null,
"Uom": "string"
},
{
"Name": "Interpolation Method",
"Value": "linear",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Interpolation",
"Value": "2day",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": null
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
- Goods
```
{
"QueryDefinitions": [
{
"Name": "PowerGoods",
"Type": "CONDITION",
"Sql": "SELECT ts1, ts2, goods FROM power.goods",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Goods",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Duration",
"Value": "10days",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": [
{
"Name": "goods",
"Value": "${columnResult}",
"Column": "goods",
"Uom": "string"
}
]
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
- Temperature
```
{
"QueryDefinitions": [
{
"Name": "PowerNum",
"Type": "SIGNAL",
"Sql": "SELECT ts, temperature FROM meters",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Temperature",
"Sql": null,
"Uom": "string"
},
{
"Name": "Interpolation Method",
"Value": "linear",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Interpolation",
"Value": "2day",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": null
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
#### Launch Seeq Workbench
Please login to Seeq server with IP:port and create a new Seeq Workbench, then select data sources and choose the correct tools to do data visualization and analysis. Please refer to [the official documentation](https://support.seeq.com/space/KB/146440193/Seeq+Workbench) for the details.
![Seeq Workbench](./seeq/seeq-demo-workbench.webp)
#### Use Seeq Data Lab Server for advanced data analysis
Please login to the Seeq service with IP:port and create a new Seeq Data Lab. Then you can use advanced tools including Python environment and machine learning add-ons for more complex analysis.
```Python
from seeq import spy
spy.options.compatibility = 189
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import mlforecast
import lightgbm as lgb
from mlforecast.target_transforms import Differences
from sklearn.linear_model import LinearRegression
ds = spy.search({'ID': "8C91A9C7-B6C2-4E18-AAAF-XXXXXXXXX"})
print(ds)
sig = ds.loc[ds['Name'].isin(['Num'])]
print(sig)
data = spy.pull(sig, start='2015-01-01', end='2022-12-31', grid=None)
print("data.info()")
data.info()
print(data)
#data.plot()
print("data[Num].info()")
data['Num'].info()
da = data['Num'].index.tolist()
#print(da)
li = data['Num'].tolist()
#print(li)
data2 = pd.DataFrame()
data2['ds'] = da
print('1st data2 ds info()')
data2['ds'].info()
#data2['ds'] = pd.to_datetime(data2['ds']).to_timestamp()
data2['ds'] = pd.to_datetime(data2['ds']).astype('int64')
data2['y'] = li
print('2nd data2 ds info()')
data2['ds'].info()
print(data2)
data2.insert(0, column = "unique_id", value="unique_id")
print("Forecasting ...")
forecast = mlforecast.MLForecast(
models = lgb.LGBMRegressor(),
freq = 1,
lags=[365],
target_transforms=[Differences([365])],
)
forecast.fit(data2)
predicts = forecast.predict(365)
pd.concat([data2, predicts]).set_index("ds").plot(title = "current data with forecast")
plt.show()
```
Example output:
![Seeq forecast result](./seeq/seeq-forecast-result.webp)
### How to configure Seeq data source to access TDengine Cloud
Configuring a Seeq data source connection to TDengine Cloud or a local installation instance does not have any essential differences. After logging in to TDengine Cloud, select "Programming - Java" and copy the JDBC URL string with the token provided. Then, use this JDBC URL string to fill in the DatabaseJdbcUrl value in the Seeq Data Source configuration.
Please note that when using TDengine Cloud, you need to specify the database name in your SQL commands.
#### The data source of TDengine Cloud example
```
{
"QueryDefinitions": [
{
"Name": "CloudVoltage",
"Type": "SIGNAL",
"Sql": "SELECT ts, voltage FROM test.meters",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Voltage",
"Sql": null,
"Uom": "string"
},
{
"Name": "Interpolation Method",
"Value": "linear",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Interpolation",
"Value": "2day",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": null
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
#### Seeq Workbench with TDengine Cloud data source example
![Seeq workbench with TDengine Cloud](./seeq/seeq-workbench-with-tdengine-cloud.webp)
## Conclusion
By integrating Seeq and TDengine, it is possible to leverage the efficient storage and querying performance of TDengine while also benefiting from Seeq's powerful data visualization and analysis capabilities provided to users.
This integration allows users to take advantage of TDengine's high-performance time-series data storage and retrieval, ensuring efficient handling of large volumes of data. At the same time, Seeq provides advanced analytics features such as data visualization, anomaly detection, correlation analysis, and predictive modeling, enabling users to gain valuable insights and make data-driven decisions.
Together, Seeq and TDengine provide a comprehensive solution for time series data analysis in diverse industries such as manufacturing, IIoT, and power systems. The combination of efficient data storage and advanced analytics empowers users to unlock the full potential of their time series data, driving operational improvements, and enabling predictive and prescriptive analytics applications.

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

View File

@ -78,6 +78,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_VARBINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);

View File

@ -76,6 +76,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_VARBINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);

View File

@ -10,7 +10,7 @@ TDengine 充分利用了时序数据的特点,提出了“一个数据采集
如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群](./deployment)一章。
我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群]一章。
TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
@ -18,8 +18,6 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移
如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器Connector想做更多详细了解请看[参考指南](./reference)一章。
如果你对 TDengine 的内部架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
Together, we make a difference!

View File

@ -6,7 +6,14 @@ toc_max_heading_level: 2
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/)且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
本章节介绍 TDengine 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
## 主要产品
TDengine 有三个主要产品TDengine Pro (即 TDengine 企业版TDengine Cloud和 TDengine OSS关于它们的具体定义请参考
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
## 主要功能

View File

@ -26,24 +26,19 @@ import CDemo from "./_sub_c.mdx";
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
说明以c接口为例
- 一个消费组消费同一个topic下的所有数据不同消费组之间相互独立
- 一个消费组消费同一个topic所有的vgroup消费组可由多个消费者组成但一个vgroup仅被一个消费者消费如果消费者数量超过了vgroup数量多余的消费者不消费数据
- 在服务端每个vgroup仅保存一个offset每个vgroup的offset是单调递增的但不一定连续。各个vgroup的offset之间没有关联
- 每次poll服务端会返回一个结果block该block属于一个vgroup可能包含多个wal版本的数据可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset
- 一个消费组如果从未commit过offset当其成员消费者重启重新拉取数据时均从参数auto.offset.reset设定值开始消费在一个消费者生命周期中客户端本地记录了最近一次拉取数据的offset不会拉取重复数据
- 消费者如果异常终止没有调用tmq_close需等约12秒后触发其所属消费组rebalance该消费者在服务端状态变为LOST约1天后该消费者自动被删除正常退出退出后就会删除消费者新增消费者需等约2秒触发rebalance该消费者在服务端状态变为ready
- 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作
- 消费者可利用 tmq_position 获得当前消费的offset并seek到指定offset重新消费
- seek将position指向指定offset不执行commit操作一旦seek成功可poll拉取指定offset及以后的数据
- seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法如非法将报错
- position是获取当前的消费位置是下次要取的位置不是当前消费到的位置
- commit是提交消费位置不带参数的话是提交当前消费位置下次要取的位置不是当前消费到的位置带参数的话是提交参数里的位置也即下次退出重启后要取的位置
- seek是设置consumer消费位置seek到哪position就返回哪都是下次要取的位置
- seek不会影响commitcommit不影响seek相互独立两个是不同的概念
- begin接口为wal 第一条数据的offsetend 接口为wal 最后一条数据的offset + 1
- tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset当seek至该offset时将消费到这个block里的全部数据。参见第四点
- 由于存在 WAL 过期删除机制即使seek 操作成功poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号将会从WAL最小版本号消费
- 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
1. 一个消费组消费同一个topic下的所有数据不同消费组之间相互独立
2. 一个消费组消费同一个topic所有的vgroup消费组可由多个消费者组成但一个vgroup仅被一个消费者消费如果消费者数量超过了vgroup数量多余的消费者不消费数据
3. 在服务端每个vgroup仅保存一个offset每个vgroup的offset是单调递增的但不一定连续。各个vgroup的offset之间没有关联
4. 每次poll服务端会返回一个结果block该block属于一个vgroup可能包含多个wal版本的数据可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset
5. 一个消费组如果从未commit过offset当其成员消费者重启重新拉取数据时均从参数auto.offset.reset设定值开始消费在一个消费者生命周期中客户端本地记录了最近一次拉取数据的offset不会拉取重复数据
6. 消费者如果异常终止没有调用tmq_close需等约12秒后触发其所属消费组rebalance该消费者在服务端状态变为LOST约1天后该消费者自动被删除正常退出退出后就会删除消费者新增消费者需等约2秒触发rebalance该消费者在服务端状态变为ready
7. 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作
8. 消费者可利用 tmq_position 获得当前消费的offset并seek到指定offset重新消费
9. seek将position指向指定offset不执行commit操作一旦seek成功可poll拉取指定offset及以后的数据
10. seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法如非法将报错
11. tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset当seek至该offset时将消费到这个block里的全部数据。参见第四点
12. 由于存在 WAL 过期删除机制即使seek 操作成功poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号将会从WAL最小版本号消费
13. 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
## 主要数据结构和 API
@ -60,17 +55,17 @@ import CDemo from "./_sub_c.mdx";
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
typedef enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end; // The last version of wal + 1
} tmq_topic_assignment;
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
DLL_EXPORT tmq_conf_t *tmq_conf_new();
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
@ -89,24 +84,24 @@ import CDemo from "./_sub_c.mdx";
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msgs offset + 1
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment);
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result
DLL_EXPORT const char *tmq_err2str(int32_t code);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
```
这些 API 的文档请见 [C/C++ Connector](../../connector/cpp)下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
</TabItem>
<TabItem value="java" label="Java">
@ -120,7 +115,19 @@ Set<String> subscription() throws SQLException;
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
Set<TopicPartition> assignment() throws SQLException;
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
void close() throws SQLException;
```

View File

@ -296,7 +296,7 @@ ldconfig
3. 如果 Python UDF 程序执行时,通过 PYTHONPATH 引用其它的包,可以设置 taos.cfg 的 UdfdLdLibPath 变量为PYTHONPATH的内容
4. 启动 `taosd` 服务
细节请参考 [快速开始](../../get-started)
细节请参考 [立即开始](../../get-started)
### 接口定义
@ -883,5 +883,5 @@ pycumsum 使用 numpy 计算输入列所有数据的累积和。
</details>
## 管理和使用 UDF
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF请参考[管理和使用 UDF](../12-taos-sql/26-udf.md)
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF请参考[管理和使用 UDF](../../taos-sql/udf)

View File

@ -36,6 +36,7 @@ REST 连接支持所有能运行 Java 的平台。
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
| 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 |
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
@ -1022,14 +1023,19 @@ while(true) {
#### 指定订阅 Offset
```java
// 获取订阅的 topicPartition
Set<TopicPartition> assignment() throws SQLException;
// 获取 offset
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
// 指定下一次 poll 中使用的 offset
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
```
示例代码:
@ -1055,6 +1061,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
}
```
#### 提交 Offset
当`enable.auto.commit`为 false 时,可以手动提交 offset。
```java
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
// 异步提交仅在 native 连接下有效
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
```
#### 关闭订阅
```java

View File

@ -143,6 +143,7 @@ phpize && ./configure --enable-swoole && make -j && make install
| `TDengine\TSDB_DATA_TYPE_FLOAT` | float |
| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double |
| `TDengine\TSDB_DATA_TYPE_BINARY` | binary |
| `TDengine\TSDB_DATA_TYPE_VARBINARY` | varbinary |
| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp |
| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar |
| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint |

View File

@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
<PkgListV3 type={1} sys="Linux" />
[所有下载](../../releases/tdengine)
2. 解压缩软件包
将软件包放置在当前用户可读写的任意目录下,然后执行下面的命令:`tar -xzvf TDengine-client-VERSION.tar.gz`

View File

@ -4,8 +4,6 @@ import PkgListV3 from "/components/PkgListV3";
<PkgListV3 type={8} sys="macOS" />
[所有下载](../../releases/tdengine)
2. 执行安装程序,按提示选择默认值,完成安装。如果安装被阻止,可以右键或者按 Ctrl 点击安装包,选择 `打开`。
3. 配置 taos.cfg

View File

@ -3,9 +3,7 @@ import PkgListV3 from "/components/PkgListV3";
1. 下载客户端安装包
<PkgListV3 type={4} sys="Windows" />
[所有下载](../../releases/tdengine)
2. 执行安装程序,按提示选择默认值,完成安装
3. 安装路径

View File

@ -62,12 +62,13 @@ serverPort 6030
加入到集群中的数据节点 dnode下表中涉及集群相关的参数必须完全相同否则不能成功加入到集群中。
| **#** | **配置参数名称** | **含义** |
| ----- | ------------------ | ------------------------------------------- |
| 1 | statusInterval | dnode 向 mnode 报告状态时长 |
| 2 | timezone | 时区 |
| 3 | locale | 系统区位信息及编码格式 |
| 4 | charset | 字符集编码 |
| **#** | **配置参数名称** | **含义** |
| ----- | ---------------- | ------------------------------------ |
| 1 | statusInterval | dnode 向 mnode 报告状态时长 |
| 2 | timezone | 时区 |
| 3 | locale | 系统区位信息及编码格式 |
| 4 | charset | 字符集编码 |
| 5 | ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变 |
## 启动集群
@ -196,10 +197,10 @@ dnodeID 是集群自动分配的,不得人工指定。它在生成时是递增
1、建立集群时使用 CREATE DNODE 增加新节点后,新节点始终显示 offline 状态?
```sql
1首先要检查增加的新节点上的 taosd 服务是否已经正常启动
2如果已经启动再检查到新节点的网络是否通畅可以使用 ping fqdn 验证下
3如果前面两步都没有问题这一步要检查新节点做为独立集群在运行了可以使用 taos -h fqdn 连接上后show dnodes; 命令查看.
如果显示的列表与你主节点上显示的不一致,说明此节点自己单独成立了一个集群,解决的方法是停止新节点上的服务,然后清空新节点上
如果显示的列表与你主节点上显示的不一致,说明此节点自己单独成立了一个集群,解决的方法是停止新节点上的服务,然后清空新节点上
taos.cfg 中配置的 dataDir 目录下的所有文件,重新启动新节点服务即可解决。
```
```

View File

@ -42,11 +42,12 @@ CREATE DATABASE db_name PRECISION 'ns';
| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 NCHAR 字符占用 4 字节的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\'`。NCHAR 使用时须指定字符串大小,类型为 NCHAR(10) 的列表示此列的字符串最多存储 10 个 NCHAR 字符。如果用户字符串长度超出声明长度,将会报错。 |
| 15 | JSON | | JSON 数据类型, 只有 Tag 可以是 JSON 格式 |
| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
| 17 | GEOMETRY | 自定义 | 几何类型 |
| 17 | GEOMETRY | 自定义 | 几何类型
| 18 | VARBINARY | 自定义 | 可变长的二进制数据|
:::note
- 表的每行长度不能超过 48KB从 3.0.5.0 版本开始为 64KB注意每个 BINARY/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。
- 表的每行长度不能超过 48KB从 3.0.5.0 版本开始为 64KB注意每个 BINARY/NCHAR/GEOMETRY/VARBINARY 类型的列还会额外占用 2 个字节的存储位置)。
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
- BINARY 类型理论上最长可以有 16,374从 3.0.5.0 版本开始,数据列为 65,517标签列为 16,382 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`
- GEOMETRY 类型数据列为最大长度为 65,517 字节,标签列最大长度为 16,382 字节。支持 2D 的 POINT、LINESTRING 和 POLYGON 子类型数据。长度计算方式如下表所示:
@ -58,6 +59,7 @@ CREATE DATABASE db_name PRECISION 'ns';
| 3 | POLYGON((1.0 1.0, 2.0 2.0, 1.0 1.0)) | 13+3*16 | 13+4094*16 | +16 |
- SQL 语句中的数值类型将依据是否存在小数点或使用科学计数法表示来判断数值类型是否为整型或者浮点型因此在使用时要注意相应类型越界的情况。例如9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
- VARBINARY 是一种存储二进制数据的数据类型,最大长度为 65,517 字节,标签列最大长度为 16,382 字节。可以通过sql或schemaless方式写入二进制数据需要转换为\x开头的字符串写入也可以通过stmt方式写入可以直接使用二进制。显示时通过16进制\x开头。
:::

View File

@ -9,7 +9,7 @@ description: 查询数据的详细语法
```sql
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
SELECT [DISTINCT] select_list
SELECT [hints] [DISTINCT] [TAGS] select_list
from_clause
[WHERE condition]
[partition_by_clause]
@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
[LIMIT limit_val [OFFSET offset_val]]
[>> export_file]
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN
select_list:
select_expr [, select_expr] ...
@ -70,6 +75,29 @@ order_expr:
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
```
## Hints
Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适用于当前的查询语句时会被自动忽略,具体说明如下:
- Hints 语法以`/*+`开始,终于`*/`,前后可有空格。
- Hints 语法只能跟随在 SELECT 关键字后。
- 每个 Hints 可以包含多个 HintHint 间以空格分开,当多个 Hint 冲突或相同时以先出现的为准。
- 当 Hints 中某个 Hint 出现错误时,错误出现之前的有效 Hint 仍然有效,当前及之后的 Hint 被忽略。
- hint_param_list 是每个 Hint 的参数,根据每个 Hint 的不同而不同。
目前支持的 Hints 列表如下:
| **Hint** | **参数** | **说明** | **适用范围** |
| :-----------: | -------------- | -------------------------- | -------------------------- |
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
举例:
```sql
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
```
## 列表
查询语句可以指定部分或全部列作为返回结果。数据列和标签列都可以出现在列表中。
@ -132,6 +160,16 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
:::
### 标签查询
当查询的列只有标签列时,`TAGS` 关键字可以指定返回所有子表的标签列。每个子表只返回一行标签列。
返回所有子表的标签列:
```sql
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
```
### 结果集列名
`SELECT`子句中,如果不指定返回结果集合的列名,结果集列名称默认使用`SELECT`子句中的表达式名称作为列名称。此外,用户可使用`AS`来重命名返回结果集合中列的名称。例如:

View File

@ -48,4 +48,6 @@ SELECT * FROM information_schema.INS_INDEXES
6. 不支持对普通和子表建立索引。
7. 如果某个 tag 列的唯一值较少时,不建议对其建立索引,这种情况下收效甚微。
7. 如果某个 tag 列的唯一值较少时,不建议对其建立索引,这种情况下收效甚微。
8. 新建立的超级表会给第一列tag随机生成一个indexNewName, 生成规则是:tag0的name + 23个byte, 在系统表可以查,也可以按需要drop,行为和其他列tag 的索引一样

View File

@ -402,7 +402,7 @@ CAST(expr AS type_name)
**返回结果类型**CAST 中指定的类型type_name)。
**适用数据类型**:输入参数 expression 的类型可以是除JSON外的所有类型。
**适用数据类型**:输入参数 expr 的类型可以是除JSON和VARBINARY外的所有类型。如果 type_name 为 VARBINARY则 expr 只能是 VARCHAR 类型。
**嵌套子查询支持**:适用于内层查询和外层查询。
@ -700,7 +700,7 @@ ELAPSED(ts_primary_key [, time_unit])
LEASTSQUARES(expr, start_val, step_val)
```
**功能说明**:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val 是自变量初始值step_val 是自变量的步长值。
**功能说明**统计表中某列的值的拟合直线方程。start_val 是自变量初始值step_val 是自变量的步长值。
**返回数据类型**:字符串表达式(斜率, 截距)。

View File

@ -201,7 +201,6 @@ TDengine 对于修改数据提供两种处理方式,由 IGNORE UPDATE 选项
对于已经存在的超级表检查列的schema信息
1. 检查列的schema信息是否匹配对于不匹配的则自动进行类型转换当前只有数据长度大于4096byte时才报错其余场景都能进行类型转换。
2. 检查列的个数是否相同如果不同需要显示的指定超级表与subquery的列的对应关系否则报错如果相同可以指定对应关系也可以不指定不指定则按位置顺序对应。
3. 至少自定义一个tag否则报错。详见 自定义TAG
## 自定义TAG
@ -249,3 +248,12 @@ T = 最新事件时间 - DELETE_MARK
- [unique](../function/#unique)
- [mode](../function/#mode)
## 暂停、恢复流计算
1.流计算暂停计算任务
PAUSE STREAM [IF EXISTS] stream_name;
没有指定IF EXISTS如果该stream不存在则报错如果存在则暂停流计算。指定了IF EXISTS如果该stream不存在则返回成功如果存在则暂停流计算
2.流计算恢复计算任务
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
没有指定IF EXISTS如果该stream不存在则报错如果存在则恢复流计算指定了IF EXISTS如果stream不存在则返回成功如果存在则恢复流计算。如果指定IGNORE UNTREATED则恢复流计算时忽略流计算暂停期间写入的数据。

View File

@ -1,138 +0,0 @@
---
sidebar_label: 权限管理
title: 权限管理
description: 企业版中才具有的权限管理功能
---
本节讲述如何在 TDengine 中进行权限管理的相关操作。权限管理是 TDengine 企业版的特有功能,本节只列举了一些基本的权限管理功能作为示例,更丰富的权限管理请联系 TDengine 销售或市场团队。
## 创建用户
```sql
CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
```
创建用户。
use_name 最长为 23 字节。
password 最长为 31 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等、存储相关的信息等。默认为可以查看系统信息。
例如创建密码为123456且可以查看系统信息的用户test如下
```sql
taos> create user test pass '123456' sysinfo 1;
Query OK, 0 of 0 rows affected (0.001254s)
```
## 查看用户
```sql
SHOW USERS;
```
查看用户信息。
```sql
taos> show users;
name | super | enable | sysinfo | create_time |
================================================================================
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
Query OK, 2 rows in database (0.001657s)
```
也可以通过查询INFORMATION_SCHEMA.INS_USERS系统表来查看用户信息例如
```sql
taos> select * from information_schema.ins_users;
name | super | enable | sysinfo | create_time |
================================================================================
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
Query OK, 2 rows in database (0.001953s)
```
## 删除用户
```sql
DROP USER user_name;
```
## 修改用户信息
```sql
ALTER USER user_name alter_user_clause
alter_user_clause: {
PASS 'literal'
| ENABLE value
| SYSINFO value
}
```
- PASS修改用户密码。
- ENABLE修改用户是否启用。1 表示启用此用户0 表示禁用此用户。
- SYSINFO修改用户是否可查看系统信息。1 表示可以查看系统信息0 表示不可以查看系统信息。
例如,禁用 test 用户:
```sql
taos> alter user test enable 0;
Query OK, 0 of 0 rows affected (0.001160s)
```
## 授权
```sql
GRANT privileges ON priv_level TO user_name
privileges : {
ALL
| priv_type [, priv_type] ...
}
priv_type : {
READ
| WRITE
}
priv_level : {
dbname.*
| *.*
}
```
对用户授权。授权功能只包含在企业版中。
授权级别支持到DATABASE权限有READ和WRITE两种。
TDengine 有超级用户和普通用户两类用户。超级用户缺省创建为root拥有所有权限。使用超级用户创建出来的用户为普通用户。在未授权的情况下普通用户可以创建DATABASE并拥有自己创建的DATABASE的所有权限包括删除数据库、修改数据库、查询时序数据和写入时序数据。超级用户可以给普通用户授予其他DATABASE的读写权限使其可以在此DATABASE上读写数据但不能对其进行删除和修改数据库的操作。
对于非DATABASE的对象如USER、DNODE、UDF、QNODE等普通用户只有读权限一般为SHOW命令不能创建和修改。
## 撤销授权
```sql
REVOKE privileges ON priv_level FROM user_name
privileges : {
ALL
| priv_type [, priv_type] ...
}
priv_type : {
READ
| WRITE
}
priv_level : {
dbname.*
| *.*
}
```
收回对用户的授权。授权功能只包含在企业版中。

View File

@ -20,6 +20,9 @@ index_option:
functions:
function [, function] ...
```
### tag 索引
[tag 索引](../tag-index)
### SMA 索引

View File

@ -180,7 +180,7 @@ AllowWebSockets
node_export 是一个机器指标的导出器。请访问 [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) 了解更多信息。
- 支持 Prometheus remote_read 和 remote_write
remote_read 和 remote_write 是 Prometheus 数据读写分离的集群方案。请访问[https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) 了解更多信息。
- 获取 table 所在的虚拟节点组VGroup的 VGroup ID。关于虚拟节点组VGroup的更多信息请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元) 。
- 获取 table 所在的虚拟节点组VGroup的 VGroup ID。
## 接口
@ -245,7 +245,7 @@ Prometheus 使用的由 \*NIX 内核暴露的硬件和操作系统指标的输
### 获取 table 的 VGroup ID
可以访问 http 接口 `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` 获取 table 的 VGroup ID。关于虚拟节点组VGroup的更多信息请访问[整体架构文档](/tdinternal/arch/#主要逻辑单元) 。
可以访问 http 接口 `http://<fqdn>:6041/rest/vgid?db=<db>&table=<table>` 获取 table 的 VGroup ID。
## 内存使用优化方法

View File

@ -11,11 +11,7 @@ taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能
## 安装
taosBenchmark 有两种安装方式:
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](/operation/pkg-install)。
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark
## 运行

View File

@ -15,7 +15,7 @@ TDengine 通过 [taosKeeper](../taosKeeper) 将服务器的 CPU、内存、硬
- 单节点的 TDengine 服务器或多节点的 [TDengine] 集群,以及一个[Grafana]服务器。此仪表盘需要 TDengine 3.0.0.0 及以上,并开启监控服务,具体配置请参考:[TDengine 监控配置](../config/#监控相关)。
- taosAdapter 已经安装并正常运行。具体细节请参考:[taosAdapter 使用手册](../taosadapter)
- taosKeeper 已安装并正常运行。具体细节请参考:[taosKeeper 使用手册](../taosKeeper)
- taosKeeper 已安装并正常运行。注意需要 taos.cfg 文件中打开 monitor 相关配置项,具体细节请参考:[taosKeeper 使用手册](../taosKeeper)
记录以下信息:
@ -120,7 +120,7 @@ chmod +x TDinsight.sh
./TDinsight.sh
```
这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。利用该脚本提供的告警设置选项,你还可以获得内置的阿里云短信告警通知支持。
这个脚本会自动下载最新的[Grafana TDengine 数据源插件](https://github.com/taosdata/grafanaplugin/releases/latest) 和 [TDinsight 仪表盘](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) ,将命令行选项中的可配置参数转为 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置文件,以进行自动化部署及更新等操作。
假设您在同一台主机上使用 TDengine 和 Grafana 的默认服务。运行 `./TDinsight.sh` 并打开 Grafana 浏览器窗口就可以看到 TDinsight 仪表盘了。
@ -152,9 +152,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
-i, --tdinsight-uid <string> Replace with a non-space ASCII code as the dashboard id. [default: tdinsight]
-t, --tdinsight-title <string> Dashboard title. [default: TDinsight]
-e, --tdinsight-editable If the provisioning dashboard could be editable. [default: false]
-E, --external-notifier <string> Apply external notifier uid to TDinsight dashboard.
```
大多数命令行选项都可以通过环境变量获得同样的效果。
@ -172,7 +169,10 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight 仪表盘`uid`。 [默认值tdinsight] |
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认TDinsight] |
| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值false] |
| -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 |
:::note
新版本插件使用 Grafana unified alerting 功能,`-E` 选项不再支持。
:::
假设您在主机 `tdengine` 上启动 TDengine 数据库HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
@ -180,18 +180,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord
```
我们提供了一个“-E”选项用于从命令行配置 TDinsight 使用现有的通知通道(Notification Channel)。假设你的 Grafana 用户和密码是 `admin:admin`,使用以下命令获取已有的通知通道的`uid`
```bash
curl --no-progress-meter -u admin:admin http://localhost:3000/api/alert-notifications | jq
```
使用上面获取的 `uid` 值作为 `-E` 输入。
```bash
./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
```
如果要监控多个 TDengine 集群,则需要设置多个 TDinsight 仪表盘。设置非默认 TDinsight 需要进行一些更改: `-n` `-i` `-t` 选项需要更改为非默认名称,如果使用 内置短信告警功能,`-N` 和 `-L` 也应该改变。
```bash

View File

@ -33,8 +33,10 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
在无模式写入数据行协议中field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说:
- 如果两边有英文双引号,表示 VARCHAR(N) 类型。例如 `"abc"`
- 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(N) 类型。例如 `L"报错信息"`
- 如果两边有英文双引号,表示 VARCHAR 类型。例如 `"abc"`
- 如果两边有英文双引号而且带有 L或l 前缀,表示 NCHAR 类型。例如 `L"报错信息"`
- 如果两边有英文双引号而且带有 G或g 前缀,表示 GEOMETRY 类型。例如 `G"Point(4.343 89.342)"`
- 如果两边有英文双引号而且带有 B或b 前缀,表示 VARBINARY 类型,双引号内可以为\x开头的16进制或者字符串。例如 `B"\x98f46e"` `B"hello"`
- 对空格、等号(=)、逗号(,)、双引号(")、反斜杠(\),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)。具体转义规则如下:
| **序号** | **域** | **需转义字符** |
@ -106,6 +108,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field后面的数据按照这个顺序如果顺序不一样需要配置参数 smlDataFormat 为 false否则数据写入按照相同顺序写入库中数据会异常从3.0.3.0开始,自动检测顺序是否一致,该配置废弃。
9. 由于sql建表表名不支持点号.所以schemaless也对点号.做了处理如果schemaless自动建表的表名如果有点号.),会自动替换为下划线(\_。如果手动指定子表名的话子表名里有点号.),同样转化为下划线(\_
10. taos.cfg 增加 smlTsDefaultName 配置值为字符串只在client端起作用配置后schemaless自动建表的时间列名字可以通过该配置设置。不配置的话默认为 _ts
11. 无模式写入的数据超级表或子表名区分大小写
:::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过

View File

@ -13,12 +13,7 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的
## 安装
taosKeeper 有两种安装方式:
taosKeeper 安装方式:
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](/operation/pkg-install)。
- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper
## 配置和运行方式

View File

@ -1,207 +0,0 @@
---
title: 安装和卸载
description: 安装、卸载、启动、停止和升级
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
本节将介绍一些关于安装和卸载更深层次的内容,以及升级的注意事项。
## 安装
关于安装,请参考 [使用安装包立即开始](../../get-started/package)
## 安装目录说明
TDengine 成功安装后,主安装目录是 /usr/local/taos目录内容如下
```
$ cd /usr/local/taos
$ ll
$ ll
total 28
drwxr-xr-x 7 root root 4096 Feb 22 09:34 ./
drwxr-xr-x 12 root root 4096 Feb 22 09:34 ../
drwxr-xr-x 2 root root 4096 Feb 22 09:34 bin/
drwxr-xr-x 2 root root 4096 Feb 22 09:34 cfg/
lrwxrwxrwx 1 root root 13 Feb 22 09:34 data -> /var/lib/taos/
drwxr-xr-x 2 root root 4096 Feb 22 09:34 driver/
drwxr-xr-x 10 root root 4096 Feb 22 09:34 examples/
drwxr-xr-x 2 root root 4096 Feb 22 09:34 include/
lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
```
- 自动生成配置文件目录、数据库目录、日志目录。
- 配置文件缺省目录:/etc/taos/taos.cfg 软链接到 /usr/local/taos/cfg/taos.cfg
- 数据库缺省目录:/var/lib/taos 软链接到 /usr/local/taos/data
- 日志缺省目录:/var/log/taos 软链接到 /usr/local/taos/log
- /usr/local/taos/bin 目录下的可执行文件,会软链接到 /usr/bin 目录下;
- /usr/local/taos/driver 目录下的动态库文件,会软链接到 /usr/lib 目录下;
- /usr/local/taos/include 目录下的头文件,会软链接到到 /usr/include 目录下;
## 卸载
<Tabs>
<TabItem label="apt-get 卸载" value="aptremove">
TDengine 卸载命令如下:
```
$ sudo apt-get remove tdengine
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following packages will be REMOVED:
tdengine
0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded.
After this operation, 68.3 MB disk space will be freed.
Do you want to continue? [Y/n] y
(Reading database ... 135625 files and directories currently installed.)
Removing tdengine (3.0.0.0) ...
TDengine is removed successfully!
```
taosTools 卸载命令如下:
```
$ sudo apt remove taostools
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following packages will be REMOVED:
taostools
0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
After this operation, 68.3 MB disk space will be freed.
Do you want to continue? [Y/n]
(Reading database ... 147973 files and directories currently installed.)
Removing taostools (2.1.2) ...
```
</TabItem>
<TabItem label="Deb 卸载" value="debuninst">
TDengine 卸载命令如下:
```
$ sudo dpkg -r tdengine
(Reading database ... 120119 files and directories currently installed.)
Removing tdengine (3.0.0.0) ...
TDengine is removed successfully!
```
taosTools 卸载命令如下:
```
$ sudo dpkg -r taostools
(Reading database ... 147973 files and directories currently installed.)
Removing taostools (2.1.2) ...
```
</TabItem>
<TabItem label="RPM 卸载" value="rpmuninst">
卸载 TDengine 命令如下:
```
$ sudo rpm -e tdengine
TDengine is removed successfully!
```
卸载 taosTools 命令如下:
```
sudo rpm -e taostools
taosToole is removed successfully!
```
</TabItem>
<TabItem label="tar.gz 卸载" value="taruninst">
卸载 TDengine 命令如下:
```
$ rmtaos
TDengine is removed successfully!
```
卸载 taosTools 命令如下:
```
$ rmtaostools
Start to uninstall taos tools ...
taos tools is uninstalled successfully!
```
</TabItem>
<TabItem label="Windows 卸载" value="windows">
在 C:\TDengine 目录下,通过运行 unins000.exe 卸载程序来卸载 TDengine。
</TabItem>
<TabItem label="Mac 卸载" value="mac">
卸载 TDengine 命令如下:
```
$ rmtaos
TDengine is removed successfully!
```
</TabItem>
</Tabs>
:::info
- TDengine 提供了多种安装包,但最好不要在一个系统上同时使用 tar.gz 安装包和 deb 或 rpm 安装包。否则会相互影响,导致在使用时出现问题。
- 对于 deb 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
```
$ sudo rm -f /var/lib/dpkg/info/tdengine*
```
然后再重新进行安装就可以了。
- 对于 rpm 包安装后,如果安装目录被手工误删了部分,出现卸载、或重新安装不能成功。此时,需要清除 TDengine 包的安装信息,执行如下命令:
```
$ sudo rpm -e --noscripts tdengine
```
然后再重新进行安装就可以了。
:::
## 卸载和更新文件说明
卸载安装包的时候,将保留配置文件、数据库文件和日志文件,即 /etc/taos/taos.cfg 、 /var/lib/taos 、 /var/log/taos 。如果用户确认后不需保留,可以手工删除,但一定要慎重,因为删除后,数据将永久丢失,不可以恢复!
如果是更新安装,当缺省配置文件( /etc/taos/taos.cfg )存在时,仍然使用已有的配置文件,安装包中携带的配置文件修改为 taos.cfg.orig 保存在 /usr/local/taos/cfg/ 目录,可以作为设置配置参数的参考样例;如果不存在配置文件,就使用安装包中自带的配置文件。
## 升级
升级分为两个层面:升级安装包 和 升级运行中的实例。
升级安装包请遵循前述安装和卸载的步骤先卸载旧版本再安装新版本。
升级运行中的实例则要复杂得多首先请注意版本号TDengine 的版本号目前分为四段,如 2.4.0.14 和 2.4.0.16,只有前三段版本号一致(即只有第四段版本号不同)才能把一个运行中的实例进行升级。升级步骤如下:
- 停止数据写入
- 确保所有数据落盘,即写入时序数据库
- 停止 TDengine 集群
- 卸载旧版本并安装新版本
- 重新启动 TDengine 集群
- 进行简单的查询操作确认旧数据没有丢失
- 进行简单的写入操作确认 TDengine 集群可用
- 重新恢复业务数据的写入
:::warning
TDengine 不保证低版本能够兼容高版本的数据,所以任何时候都不推荐降级
:::

View File

@ -218,11 +218,11 @@ docker run -d \
### 导入 Dashboard
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。
在数据源配置页面,您可以为该数据源导入 TDinsight 面板,作为 TDengine 集群的监控可视化工具。如果 TDengine 服务端为 3.0 版本请选择 `TDinsight for 3.x` 导入。注意 TDinsight for 3.x 需要运行和配置 taoskeeper相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
![TDengine Database Grafana plugine import dashboard](./import_dashboard.webp)
其中适配 TDengine 2.* 的 Dashboard 已发布在 Grafana[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。其他安装方式和相关使用说明请见 [TDinsight 用户手册](/reference/tdinsight/)。
其中适配 TDengine 2.* 的 Dashboard 已发布在 Grafana[Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。
使用 TDengine 作为数据源的其他面板,可以[在此搜索](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource)。以下是一份不完全列表:

View File

@ -23,7 +23,7 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
1. Linux 操作系统
2. 已安装 Java 8 和 Maven
3. 已安装 Git、curl、vi
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
4. 已安装并启动 TDengine。
## 安装 Kafka

438
docs/zh/20-third-party/70-seeq.md vendored Normal file
View File

@ -0,0 +1,438 @@
---
sidebar_label: Seeq
title: Seeq
description: 如何使用 Seeq 和 TDengine 进行时序数据分析
---
# 如何使用 Seeq 和 TDengine 进行时序数据分析
## 方案介绍
Seeq 是制造业和工业互联网IIOT高级分析软件。Seeq 支持在工艺制造组织中使用机器学习创新的新功能。这些功能使组织能够将自己或第三方机器学习算法部署到前线流程工程师和主题专家使用的高级分析应用程序,从而使单个数据科学家的努力扩展到许多前线员工。
通过 TDengine Java connector Seeq 可以轻松支持查询 TDengine 提供的时序数据,并提供数据展现、分析、预测等功能。
### Seeq 安装方法
从 [Seeq 官网](https://www.seeq.com/customer-download)下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
### Seeq Server 安装和启动
```
tar xvzf seeq-server-xxx.tar.gz
cd seeq-server-installer
sudo ./install
sudo seeq service enable
sudo seeq start
```
### Seeq Data Lab Server 安装和启动
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见[Seeq 官方文档](https://support.seeq.com/space/KB/1034059842)。
```
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
sudo seeq config set Network/DataLab/Hostname localhost
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
# If the main Seeq server is configured to listen over HTTPS
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
# If the main Seeq server is NOT configured to listen over HTTPS
sudo seeq config set Network/Webserver/Port <value>
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
```
## TDengine 本地实例安装方法
请参考[官网文档](../../get-started)。
## TDengine Cloud 访问方法
如果使用 Seeq 连接 TDengine Cloud请在 https://cloud.taosdata.com 申请帐号并登录查看如何访问 TDengine Cloud。
## 如何配置 Seeq 访问 TDengine
1. 查看 data 存储位置
```
sudo seeq config get Folders/Data
```
2. 从 maven.org 下载 TDengine Java connector 包,目前最新版本为[3.2.5](https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.5/taos-jdbcdriver-3.2.5-dist.jar),并拷贝至 data 存储位置的 plugins\lib 中。
3. 重新启动 seeq server
```
sudo seeq restart
```
4. 输入 License
使用浏览器访问 ip:34216 并按照说明输入 license。
## 使用 Seeq 分析 TDengine 时序数据
本章节演示如何使用 Seeq 软件配合 TDengine 进行时序数据分析。
### 场景介绍
示例场景为一个电力系统,用户每天从电站仪表收集用电量数据,并将其存储在 TDengine 集群中。现在用户想要预测电力消耗将会如何发展,并购买更多设备来支持它。用户电力消耗随着每月订单变化而不同,另外考虑到季节变化,电力消耗量会有所不同。这个城市位于北半球,所以在夏天会使用更多的电力。我们模拟数据来反映这些假定。
### 数据 Schema
```
CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS (device NCHAR(20));
CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
```
![Seeq demo schema](./seeq/seeq-demo-schema.webp)
### 构造数据方法
```
python mockdata.py
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
```
源代码托管在[GitHub 仓库](https://github.com/sangshuduo/td-forecasting)。
### 使用 Seeq 进行数据分析
#### 配置数据源Data Source
使用 Seeq 管理员角色的帐号登录,并新建数据源。
- Power
```
{
"QueryDefinitions": [
{
"Name": "PowerNum",
"Type": "SIGNAL",
"Sql": "SELECT ts, num FROM meters",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Num",
"Sql": null,
"Uom": "string"
},
{
"Name": "Interpolation Method",
"Value": "linear",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Interpolation",
"Value": "2day",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": null
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
- Goods
```
{
"QueryDefinitions": [
{
"Name": "PowerGoods",
"Type": "CONDITION",
"Sql": "SELECT ts1, ts2, goods FROM power.goods",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Goods",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Duration",
"Value": "10days",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": [
{
"Name": "goods",
"Value": "${columnResult}",
"Column": "goods",
"Uom": "string"
}
]
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
- Temperature
```
{
"QueryDefinitions": [
{
"Name": "PowerNum",
"Type": "SIGNAL",
"Sql": "SELECT ts, temperature FROM meters",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Temperature",
"Sql": null,
"Uom": "string"
},
{
"Name": "Interpolation Method",
"Value": "linear",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Interpolation",
"Value": "2day",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": null
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
#### 使用 Seeq Workbench
登录 Seeq 服务页面并新建 Seeq Workbench通过选择数据源搜索结果和根据需要选择不同的工具可以进行数据展现或预测详细使用方法参见[官方知识库](https://support.seeq.com/space/KB/146440193/Seeq+Workbench)。
![Seeq Workbench](./seeq/seeq-demo-workbench.webp)
#### 用 Seeq Data Lab Server 进行进一步的数据分析
登录 Seeq 服务页面并新建 Seeq Data Lab可以进一步使用 Python 编程或其他机器学习工具进行更复杂的数据挖掘功能。
```Python
from seeq import spy
spy.options.compatibility = 189
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import mlforecast
import lightgbm as lgb
from mlforecast.target_transforms import Differences
from sklearn.linear_model import LinearRegression
ds = spy.search({'ID': "8C91A9C7-B6C2-4E18-AAAF-XXXXXXXXX"})
print(ds)
sig = ds.loc[ds['Name'].isin(['Num'])]
print(sig)
data = spy.pull(sig, start='2015-01-01', end='2022-12-31', grid=None)
print("data.info()")
data.info()
print(data)
#data.plot()
print("data[Num].info()")
data['Num'].info()
da = data['Num'].index.tolist()
#print(da)
li = data['Num'].tolist()
#print(li)
data2 = pd.DataFrame()
data2['ds'] = da
print('1st data2 ds info()')
data2['ds'].info()
#data2['ds'] = pd.to_datetime(data2['ds']).to_timestamp()
data2['ds'] = pd.to_datetime(data2['ds']).astype('int64')
data2['y'] = li
print('2nd data2 ds info()')
data2['ds'].info()
print(data2)
data2.insert(0, column = "unique_id", value="unique_id")
print("Forecasting ...")
forecast = mlforecast.MLForecast(
models = lgb.LGBMRegressor(),
freq = 1,
lags=[365],
target_transforms=[Differences([365])],
)
forecast.fit(data2)
predicts = forecast.predict(365)
pd.concat([data2, predicts]).set_index("ds").plot(title = "current data with forecast")
plt.show()
```
运行程序输出结果:
![Seeq forecast result](./seeq/seeq-forecast-result.webp)
### 配置 Seeq 数据源连接 TDengine Cloud
配置 Seeq 数据源连接 TDengine Cloud 和连接 TDengine 本地安装实例没有本质的不同,只要登录 TDengine Cloud 后选择“编程 - Java”并拷贝带 token 字符串的 JDBC 填写为 Seeq Data Source 的 DatabaseJdbcUrl 值。
注意使用 TDengine Cloud 时 SQL 命令中需要指定数据库名称。
#### 用 TDengine Cloud 作为数据源的配置内容示例:
```
{
"QueryDefinitions": [
{
"Name": "CloudVoltage",
"Type": "SIGNAL",
"Sql": "SELECT ts, voltage FROM test.meters",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Voltage",
"Sql": null,
"Uom": "string"
},
{
"Name": "Interpolation Method",
"Value": "linear",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Interpolation",
"Value": "2day",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": null
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
#### TDengine Cloud 作为数据源的 Seeq Workbench 界面示例
![Seeq workbench with TDengine cloud](./seeq/seeq-workbench-with-tdengine-cloud.webp)
## 方案总结
通过集成Seeq和TDengine可以充分利用TDengine高效的存储和查询性能同时也可以受益于Seeq提供给用户的强大数据可视化和分析功能。
这种集成使用户能够充分利用TDengine的高性能时序数据存储和检索确保高效处理大量数据。同时Seeq提供高级分析功能如数据可视化、异常检测、相关性分析和预测建模使用户能够获得有价值的洞察并基于数据进行决策。
综合来看Seeq和TDengine共同为制造业、工业物联网和电力系统等各行各业的时序数据分析提供了综合解决方案。高效数据存储和先进的分析相结合赋予用户充分发挥时序数据潜力的能力推动运营改进并支持预测和规划分析应用。

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

View File

@ -43,7 +43,7 @@ int main(int argc, char *argv[])
taos_free_result(result);
// create table
const char* sql = "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10))";
const char* sql = "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10), varbin varbinary(16))";
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
@ -68,6 +68,7 @@ int main(int argc, char *argv[])
double f8;
char bin[40];
char blob[80];
int8_t varbin[16];
} v = {0};
int32_t boolLen = sizeof(int8_t);
@ -80,7 +81,7 @@ int main(int argc, char *argv[])
int32_t ncharLen = 30;
stmt = taos_stmt_init(taos);
TAOS_MULTI_BIND params[10];
TAOS_MULTI_BIND params[11];
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[0].buffer_length = sizeof(v.ts);
params[0].buffer = &v.ts;
@ -152,9 +153,19 @@ int main(int argc, char *argv[])
params[9].is_null = NULL;
params[9].num = 1;
int8_t tmp[16] = {'a', 0, 1, 13, '1'};
int32_t vbinLen = 5;
memcpy(v.varbin, tmp, sizeof(v.varbin));
params[10].buffer_type = TSDB_DATA_TYPE_VARBINARY;
params[10].buffer_length = sizeof(v.varbin);
params[10].buffer = v.varbin;
params[10].length = &vbinLen;
params[10].is_null = NULL;
params[10].num = 1;
char is_null = 1;
sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?)";
sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?,?)";
code = taos_stmt_prepare(stmt, sql, 0);
if (code != 0){
printf("failed to execute taos_stmt_prepare. code:0x%x\n", code);
@ -162,7 +173,7 @@ int main(int argc, char *argv[])
v.ts = 1591060628000;
for (int i = 0; i < 10; ++i) {
v.ts += 1;
for (int j = 1; j < 10; ++j) {
for (int j = 1; j < 11; ++j) {
params[j].is_null = ((i == j) ? &is_null : 0);
}
v.b = (int8_t)i % 2;
@ -216,7 +227,7 @@ int main(int argc, char *argv[])
printf("expect two rows, but %d rows are fetched\n", rows);
}
taos_free_result(result);
// taos_free_result(result);
taos_stmt_close(stmt);
return 0;

View File

@ -280,7 +280,7 @@ void consume_repeatly(tmq_t* tmq) {
code = tmq_offset_seek(tmq, topic_name, p->vgId, p->begin);
if (code != 0) {
fprintf(stderr, "failed to seek to %ld, reason:%s", p->begin, tmq_err2str(code));
fprintf(stderr, "failed to seek to %d, reason:%s", (int)p->begin, tmq_err2str(code));
}
}

View File

@ -272,7 +272,7 @@ typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end; // The last invalidate version of wal + 1
int64_t end;
} tmq_topic_assignment;
DLL_EXPORT tmq_conf_t *tmq_conf_new();
@ -305,7 +305,7 @@ DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t v
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
DLL_EXPORT const char *tmq_err2str(int32_t code);
/* ------------------------------ TAOSX -----------------------------------*/

View File

@ -179,6 +179,8 @@ int32_t getJsonValueLen(const char* data);
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue);
int32_t colDataCopyNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
uint32_t numOfRows, bool isNull);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
const SColumnInfoData* pSource, int32_t numOfRow2);
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
@ -241,7 +243,7 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols);
const char* blockDecode(SSDataBlock* pBlock, const char* pData);
// for debug
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf, const char* taskIdStr);
int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pDataBlocks, const STSchema* pTSchema, int64_t uid, int32_t vgId,
tb_uid_t suid);

View File

@ -102,6 +102,11 @@ extern uint16_t tsMonitorPort;
extern int32_t tsMonitorMaxLogs;
extern bool tsMonitorComp;
// audit
extern bool tsEnableAudit;
extern char tsAuditFqdn[];
extern uint16_t tsAuditPort;
// telem
extern bool tsEnableTelem;
extern int32_t tsTelemInterval;
@ -130,6 +135,7 @@ extern bool tsKeepColumnName;
extern bool tsEnableQueryHb;
extern bool tsEnableScience;
extern bool tsTtlChangeOnWrite;
extern int32_t tsTtlFlushThreshold;
extern int32_t tsRedirectPeriod;
extern int32_t tsRedirectFactor;
extern int32_t tsRedirectMaxPeriod;
@ -161,6 +167,7 @@ extern char tsCompressor[];
// tfs
extern int32_t tsDiskCfgNum;
extern SDiskCfg tsDiskCfg[];
extern int64_t tsMinDiskFreeSize;
// udf
extern bool tsStartUdfd;
@ -184,8 +191,11 @@ extern int64_t tsWalFsyncDataSizeLimit;
extern int32_t tsTransPullupInterval;
extern int32_t tsMqRebalanceInterval;
extern int32_t tsStreamCheckpointTickInterval;
extern int32_t tsStreamNodeCheckInterval;
extern int32_t tsTtlUnit;
extern int32_t tsTtlPushInterval;
extern int32_t tsTtlPushIntervalSec;
extern int32_t tsTtlBatchDropNum;
extern int32_t tsTrimVDbIntervalSec;
extern int32_t tsGrantHBInterval;
extern int32_t tsUptimeInterval;
@ -194,7 +204,6 @@ extern int32_t tsRpcRetryInterval;
extern bool tsDisableStream;
extern int64_t tsStreamBufferSize;
extern int64_t tsCheckpointInterval;
extern bool tsFilterScalarMode;
extern int32_t tsKeepTimeOffset;
extern int32_t tsMaxStreamBackendCache;

View File

@ -28,6 +28,22 @@ typedef struct SCorEpSet {
} SCorEpSet;
#define GET_ACTIVE_EP(_eps) (&((_eps)->eps[(_eps)->inUse]))
#define EPSET_TO_STR(_eps, tbuf) \
do { \
int len = snprintf((tbuf), sizeof(tbuf), "epset:{"); \
for (int _i = 0; _i < (_eps)->numOfEps; _i++) { \
if (_i == (_eps)->numOfEps - 1) { \
len += \
snprintf((tbuf) + len, sizeof(tbuf) - len, "%d. %s:%d", _i, (_eps)->eps[_i].fqdn, (_eps)->eps[_i].port); \
} else { \
len += \
snprintf((tbuf) + len, sizeof(tbuf) - len, "%d. %s:%d, ", _i, (_eps)->eps[_i].fqdn, (_eps)->eps[_i].port); \
} \
} \
len += snprintf((tbuf) + len, sizeof(tbuf) - len, "}, inUse:%d", (_eps)->inUse); \
} while (0);
int32_t taosGetFqdnPortFromEp(const char* ep, SEp* pEp);
void addEpIntoEpSet(SEpSet* pEpSet, const char* fqdn, uint16_t port);

View File

@ -77,7 +77,8 @@ static inline bool tmsgIsValid(tmsg_t type) {
}
static inline bool vnodeIsMsgBlock(tmsg_t type) {
return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) ||
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_COMMIT);
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_COMMIT) ||
(type == TDMT_SYNC_CONFIG_CHANGE);
}
static inline bool syncUtilUserCommit(tmsg_t msgType) {
@ -212,6 +213,215 @@ typedef enum _mgmt_table {
#define TD_REQ_FROM_APP 0
#define TD_REQ_FROM_TAOX 1
typedef enum ENodeType {
// Syntax nodes are used in parser and planner module, and some are also used in executor module, such as COLUMN,
// VALUE, OPERATOR, FUNCTION and so on.
QUERY_NODE_COLUMN = 1,
QUERY_NODE_VALUE,
QUERY_NODE_OPERATOR,
QUERY_NODE_LOGIC_CONDITION,
QUERY_NODE_FUNCTION,
QUERY_NODE_REAL_TABLE,
QUERY_NODE_TEMP_TABLE,
QUERY_NODE_JOIN_TABLE,
QUERY_NODE_GROUPING_SET,
QUERY_NODE_ORDER_BY_EXPR,
QUERY_NODE_LIMIT,
QUERY_NODE_STATE_WINDOW,
QUERY_NODE_SESSION_WINDOW,
QUERY_NODE_INTERVAL_WINDOW,
QUERY_NODE_NODE_LIST,
QUERY_NODE_FILL,
QUERY_NODE_RAW_EXPR, // Only be used in parser module.
QUERY_NODE_TARGET,
QUERY_NODE_DATABLOCK_DESC,
QUERY_NODE_SLOT_DESC,
QUERY_NODE_COLUMN_DEF,
QUERY_NODE_DOWNSTREAM_SOURCE,
QUERY_NODE_DATABASE_OPTIONS,
QUERY_NODE_TABLE_OPTIONS,
QUERY_NODE_INDEX_OPTIONS,
QUERY_NODE_EXPLAIN_OPTIONS,
QUERY_NODE_STREAM_OPTIONS,
QUERY_NODE_LEFT_VALUE,
QUERY_NODE_COLUMN_REF,
QUERY_NODE_WHEN_THEN,
QUERY_NODE_CASE_WHEN,
QUERY_NODE_EVENT_WINDOW,
QUERY_NODE_HINT,
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100,
QUERY_NODE_SELECT_STMT,
QUERY_NODE_VNODE_MODIFY_STMT,
QUERY_NODE_CREATE_DATABASE_STMT,
QUERY_NODE_DROP_DATABASE_STMT,
QUERY_NODE_ALTER_DATABASE_STMT,
QUERY_NODE_FLUSH_DATABASE_STMT,
QUERY_NODE_TRIM_DATABASE_STMT,
QUERY_NODE_CREATE_TABLE_STMT,
QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
QUERY_NODE_CREATE_MULTI_TABLES_STMT,
QUERY_NODE_DROP_TABLE_CLAUSE,
QUERY_NODE_DROP_TABLE_STMT,
QUERY_NODE_DROP_SUPER_TABLE_STMT,
QUERY_NODE_ALTER_TABLE_STMT,
QUERY_NODE_ALTER_SUPER_TABLE_STMT,
QUERY_NODE_CREATE_USER_STMT,
QUERY_NODE_ALTER_USER_STMT,
QUERY_NODE_DROP_USER_STMT,
QUERY_NODE_USE_DATABASE_STMT,
QUERY_NODE_CREATE_DNODE_STMT,
QUERY_NODE_DROP_DNODE_STMT,
QUERY_NODE_ALTER_DNODE_STMT,
QUERY_NODE_CREATE_INDEX_STMT,
QUERY_NODE_DROP_INDEX_STMT,
QUERY_NODE_CREATE_QNODE_STMT,
QUERY_NODE_DROP_QNODE_STMT,
QUERY_NODE_CREATE_BNODE_STMT,
QUERY_NODE_DROP_BNODE_STMT,
QUERY_NODE_CREATE_SNODE_STMT,
QUERY_NODE_DROP_SNODE_STMT,
QUERY_NODE_CREATE_MNODE_STMT,
QUERY_NODE_DROP_MNODE_STMT,
QUERY_NODE_CREATE_TOPIC_STMT,
QUERY_NODE_DROP_TOPIC_STMT,
QUERY_NODE_DROP_CGROUP_STMT,
QUERY_NODE_ALTER_LOCAL_STMT,
QUERY_NODE_EXPLAIN_STMT,
QUERY_NODE_DESCRIBE_STMT,
QUERY_NODE_RESET_QUERY_CACHE_STMT,
QUERY_NODE_COMPACT_DATABASE_STMT,
QUERY_NODE_CREATE_FUNCTION_STMT,
QUERY_NODE_DROP_FUNCTION_STMT,
QUERY_NODE_CREATE_STREAM_STMT,
QUERY_NODE_DROP_STREAM_STMT,
QUERY_NODE_BALANCE_VGROUP_STMT,
QUERY_NODE_MERGE_VGROUP_STMT,
QUERY_NODE_REDISTRIBUTE_VGROUP_STMT,
QUERY_NODE_SPLIT_VGROUP_STMT,
QUERY_NODE_SYNCDB_STMT,
QUERY_NODE_GRANT_STMT,
QUERY_NODE_REVOKE_STMT,
QUERY_NODE_SHOW_DNODES_STMT,
QUERY_NODE_SHOW_MNODES_STMT,
QUERY_NODE_SHOW_MODULES_STMT,
QUERY_NODE_SHOW_QNODES_STMT,
QUERY_NODE_SHOW_SNODES_STMT,
QUERY_NODE_SHOW_BNODES_STMT,
QUERY_NODE_SHOW_CLUSTER_STMT,
QUERY_NODE_SHOW_DATABASES_STMT,
QUERY_NODE_SHOW_FUNCTIONS_STMT,
QUERY_NODE_SHOW_INDEXES_STMT,
QUERY_NODE_SHOW_STABLES_STMT,
QUERY_NODE_SHOW_STREAMS_STMT,
QUERY_NODE_SHOW_TABLES_STMT,
QUERY_NODE_SHOW_TAGS_STMT,
QUERY_NODE_SHOW_USERS_STMT,
QUERY_NODE_SHOW_LICENCES_STMT,
QUERY_NODE_SHOW_VGROUPS_STMT,
QUERY_NODE_SHOW_TOPICS_STMT,
QUERY_NODE_SHOW_CONSUMERS_STMT,
QUERY_NODE_SHOW_CONNECTIONS_STMT,
QUERY_NODE_SHOW_QUERIES_STMT,
QUERY_NODE_SHOW_APPS_STMT,
QUERY_NODE_SHOW_VARIABLES_STMT,
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_USER_PRIVILEGES_STMT,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
QUERY_NODE_SHOW_SCORES_STMT,
QUERY_NODE_SHOW_TABLE_TAGS_STMT,
QUERY_NODE_KILL_CONNECTION_STMT,
QUERY_NODE_KILL_QUERY_STMT,
QUERY_NODE_KILL_TRANSACTION_STMT,
QUERY_NODE_DELETE_STMT,
QUERY_NODE_INSERT_STMT,
QUERY_NODE_QUERY,
QUERY_NODE_SHOW_DB_ALIVE_STMT,
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
QUERY_NODE_RESTORE_DNODE_STMT,
QUERY_NODE_RESTORE_QNODE_STMT,
QUERY_NODE_RESTORE_MNODE_STMT,
QUERY_NODE_RESTORE_VNODE_STMT,
QUERY_NODE_PAUSE_STREAM_STMT,
QUERY_NODE_RESUME_STREAM_STMT,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
QUERY_NODE_LOGIC_PLAN_JOIN,
QUERY_NODE_LOGIC_PLAN_AGG,
QUERY_NODE_LOGIC_PLAN_PROJECT,
QUERY_NODE_LOGIC_PLAN_VNODE_MODIFY,
QUERY_NODE_LOGIC_PLAN_EXCHANGE,
QUERY_NODE_LOGIC_PLAN_MERGE,
QUERY_NODE_LOGIC_PLAN_WINDOW,
QUERY_NODE_LOGIC_PLAN_FILL,
QUERY_NODE_LOGIC_PLAN_SORT,
QUERY_NODE_LOGIC_PLAN_PARTITION,
QUERY_NODE_LOGIC_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_LOGIC_PLAN_INTERP_FUNC,
QUERY_NODE_LOGIC_SUBPLAN,
QUERY_NODE_LOGIC_PLAN,
QUERY_NODE_LOGIC_PLAN_GROUP_CACHE,
QUERY_NODE_LOGIC_PLAN_DYN_QUERY_CTRL,
// physical plan node
QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN,
QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN,
QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN,
QUERY_NODE_PHYSICAL_PLAN_PROJECT,
QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN,
QUERY_NODE_PHYSICAL_PLAN_HASH_AGG,
QUERY_NODE_PHYSICAL_PLAN_EXCHANGE,
QUERY_NODE_PHYSICAL_PLAN_MERGE,
QUERY_NODE_PHYSICAL_PLAN_SORT,
QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT,
QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_FILL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION,
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
QUERY_NODE_PHYSICAL_PLAN_INSERT,
QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT,
QUERY_NODE_PHYSICAL_PLAN_DELETE,
QUERY_NODE_PHYSICAL_SUBPLAN,
QUERY_NODE_PHYSICAL_PLAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT,
QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN,
QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE,
QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL
} ENodeType;
typedef struct {
int32_t vgId;
char* dbFName;
@ -741,6 +951,10 @@ typedef struct STimeWindow {
TSKEY ekey;
} STimeWindow;
typedef struct SQueryHint {
bool batchScan;
} SQueryHint;
typedef struct {
int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed
int32_t tsLen; // total length of ts comp block
@ -759,12 +973,18 @@ typedef struct {
int64_t offset;
} SInterval;
typedef struct {
int32_t code;
typedef struct STbVerInfo {
char tbFName[TSDB_TABLE_FNAME_LEN];
int32_t sversion;
int32_t tversion;
} STbVerInfo;
typedef struct {
int32_t code;
int64_t affectedRows;
SArray* tbVerInfo; // STbVerInfo
} SQueryTableRsp;
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
@ -939,6 +1159,9 @@ int32_t tDeserializeSVTrimDbReq(void* buf, int32_t bufLen, SVTrimDbReq* pReq);
typedef struct {
int32_t timestampSec;
int32_t ttlDropMaxCount;
int32_t nUids;
SArray* pTbUids;
} SVDropTtlTableReq;
int32_t tSerializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq);
@ -1163,6 +1386,9 @@ typedef struct {
int32_t vgId;
int8_t syncState;
int8_t syncRestore;
int64_t syncTerm;
int64_t roleTimeMs;
int64_t startTimeMs;
int8_t syncCanRead;
int64_t cacheUsage;
int64_t numOfTables;
@ -1176,12 +1402,13 @@ typedef struct {
int64_t numOfBatchInsertReqs;
int64_t numOfBatchInsertSuccessReqs;
int32_t numOfCachedTables;
int32_t learnerProgress; // use one reservered
} SVnodeLoad;
typedef struct {
int8_t syncState;
int8_t syncRestore;
int8_t syncState;
int64_t syncTerm;
int8_t syncRestore;
int64_t roleTimeMs;
} SMnodeLoad;
@ -1191,6 +1418,7 @@ typedef struct {
int64_t numOfProcessedCQuery;
int64_t numOfProcessedFetch;
int64_t numOfProcessedDrop;
int64_t numOfProcessedNotify;
int64_t numOfProcessedHb;
int64_t numOfProcessedDelete;
int64_t cacheDataSize;
@ -1317,6 +1545,7 @@ typedef struct {
int8_t learnerReplica;
int8_t learnerSelfIndex;
SReplica learnerReplicas[TSDB_MAX_LEARNER_REPLICA];
int32_t changeVersion;
} SCreateVnodeReq;
int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq);
@ -1391,7 +1620,8 @@ typedef struct {
int8_t learnerSelfIndex;
int8_t learnerReplica;
SReplica learnerReplicas[TSDB_MAX_LEARNER_REPLICA];
} SAlterVnodeReplicaReq, SAlterVnodeTypeReq;
int32_t changeVersion;
} SAlterVnodeReplicaReq, SAlterVnodeTypeReq, SCheckLearnCatchupReq;
int32_t tSerializeSAlterVnodeReplicaReq(void* buf, int32_t bufLen, SAlterVnodeReplicaReq* pReq);
int32_t tDeserializeSAlterVnodeReplicaReq(void* buf, int32_t bufLen, SAlterVnodeReplicaReq* pReq);
@ -1409,7 +1639,8 @@ typedef struct {
int32_t dstVgId;
uint32_t hashBegin;
uint32_t hashEnd;
int64_t reserved;
int32_t changeVersion;
int32_t reserved;
} SAlterVnodeHashRangeReq;
int32_t tSerializeSAlterVnodeHashRangeReq(void* buf, int32_t bufLen, SAlterVnodeHashRangeReq* pReq);
@ -1831,12 +2062,26 @@ typedef struct {
int32_t tversion;
} SResReadyRsp;
typedef struct SOperatorParam {
int32_t opType;
int32_t downstreamIdx;
void* value;
SArray* pChildren; //SArray<SOperatorParam*>
} SOperatorParam;
typedef struct STableScanOperatorParam {
bool tableSeq;
SArray* pUidList;
} STableScanOperatorParam;
typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t taskId;
int32_t execId;
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t taskId;
int32_t execId;
SOperatorParam* pOpParam;
} SResFetchReq;
int32_t tSerializeSResFetchReq(void* buf, int32_t bufLen, SResFetchReq* pReq);
@ -1915,8 +2160,24 @@ typedef struct {
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
typedef enum {
TASK_NOTIFY_FINISHED = 1,
} ETaskNotifyType;
typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t taskId;
int64_t refId;
int32_t execId;
ETaskNotifyType type;
} STaskNotifyReq;
int32_t tSerializeSTaskNotifyReq(void* buf, int32_t bufLen, STaskNotifyReq* pReq);
int32_t tDeserializeSTaskNotifyReq(void* buf, int32_t bufLen, STaskNotifyReq* pReq);
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
int32_t tDeserializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);

View File

@ -65,7 +65,7 @@ enum {
#define TD_NEW_MSG_SEG(TYPE) TYPE = ((TYPE##_SEG_CODE) << 8),
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE, TYPE##_RSP,
enum {
enum { // WARN: new msg should be appended to segment tail
#endif
TD_NEW_MSG_SEG(TDMT_DND_MSG)
TD_DEF_MSG_TYPE(TDMT_DND_CREATE_MNODE, "dnode-create-mnode", NULL, NULL)
@ -85,18 +85,19 @@ enum {
TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_MNODE_TYPE, "dnode-alter-mnode-type", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_VNODE_TYPE, "dnode-alter-vnode-type", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, "dnode-check-vnode-learner-catchup", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_MND_MSG)
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "create-acct", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "create-acct", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_ACCT, "alter-acct", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_ACCT, "drop-acct", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_USER, "create-user", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_USER, "create-user", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_USER, "alter-user", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_USER, "drop-user", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_USER_AUTH, "get-user-auth", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_USER, "drop-user", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_USER_AUTH, "get-user-auth", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_DNODE, "create-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CONFIG_DNODE, "config-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CONFIG_DNODE, "config-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_DNODE, "drop-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_MNODE, "create-mnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_MNODE, "alter-mnode", NULL, NULL)
@ -156,6 +157,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TTL_TIMER, "ttl-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GRANT_HB_TIMER, "grant-hb-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_NODECHECK_TIMER, "node-check-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_TRANS, "kill-trans", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_QUERY, "kill-query", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_CONN, "kill-conn", NULL, NULL)
@ -174,13 +176,17 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_UPTIME_TIMER, "uptime-timer", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TMQ_LOST_CONSUMER_CLEAR, "lost-consumer-clear", NULL, NULL)
// TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL)
// TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_HEARTBEAT, "stream-heartbeat", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP_LEADER, "balance-vgroup-leader", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_RESTORE_DNODE, "restore-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_PAUSE_STREAM, "pause-stream", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_CHECKPOINT_TIMER, "stream-checkpoint-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_STREAM_NODECHANGE_CHECK, "stream-nodechange-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_MSG)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
@ -242,6 +248,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_SCH_DROP_TASK, "drop-task", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_EXPLAIN, "explain", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_TASK_NOTIFY, "task-notify", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL)
@ -252,15 +259,13 @@ enum {
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_DISPATCH, "stream-task-dispatch", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_UNUSED1, "stream-unused1", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY, "stream-scan-history", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY_FINISH, "stream-scan-history-finish", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECK, "stream-task-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT, "stream-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT_READY, "stream-checkpoint-ready", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESTORE_CHECKPOINT, "stream-restore-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_PAUSE, "stream-task-pause", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_RESUME, "stream-task-resume", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_STOP, "stream-task-stop", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_MON_MSG)
TD_DEF_MSG_TYPE(TDMT_MON_MAX_MSG, "monitor-max", NULL, NULL)
@ -295,11 +300,14 @@ enum {
TD_DEF_MSG_TYPE(TDMT_SYNC_PRE_SNAPSHOT_REPLY, "sync-pre-snapshot-reply", NULL, NULL) // no longer used
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
// TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY_FINISH, "vnode-stream-scan-history-finish", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_CHECK_POINT_SOURCE, "vnode-stream-checkpoint-source", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_UPDATE, "vnode-stream-update", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_MAX_MSG, "vnd-stream-max", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_TMQ_MSG)

View File

@ -276,91 +276,96 @@
#define TK_JOIN 258
#define TK_INNER 259
#define TK_SELECT 260
#define TK_DISTINCT 261
#define TK_WHERE 262
#define TK_PARTITION 263
#define TK_BY 264
#define TK_SESSION 265
#define TK_STATE_WINDOW 266
#define TK_EVENT_WINDOW 267
#define TK_SLIDING 268
#define TK_FILL 269
#define TK_VALUE 270
#define TK_VALUE_F 271
#define TK_NONE 272
#define TK_PREV 273
#define TK_NULL_F 274
#define TK_LINEAR 275
#define TK_NEXT 276
#define TK_HAVING 277
#define TK_RANGE 278
#define TK_EVERY 279
#define TK_ORDER 280
#define TK_SLIMIT 281
#define TK_SOFFSET 282
#define TK_LIMIT 283
#define TK_OFFSET 284
#define TK_ASC 285
#define TK_NULLS 286
#define TK_ABORT 287
#define TK_AFTER 288
#define TK_ATTACH 289
#define TK_BEFORE 290
#define TK_BEGIN 291
#define TK_BITAND 292
#define TK_BITNOT 293
#define TK_BITOR 294
#define TK_BLOCKS 295
#define TK_CHANGE 296
#define TK_COMMA 297
#define TK_CONCAT 298
#define TK_CONFLICT 299
#define TK_COPY 300
#define TK_DEFERRED 301
#define TK_DELIMITERS 302
#define TK_DETACH 303
#define TK_DIVIDE 304
#define TK_DOT 305
#define TK_EACH 306
#define TK_FAIL 307
#define TK_FILE 308
#define TK_FOR 309
#define TK_GLOB 310
#define TK_ID 311
#define TK_IMMEDIATE 312
#define TK_IMPORT 313
#define TK_INITIALLY 314
#define TK_INSTEAD 315
#define TK_ISNULL 316
#define TK_KEY 317
#define TK_MODULES 318
#define TK_NK_BITNOT 319
#define TK_NK_SEMI 320
#define TK_NOTNULL 321
#define TK_OF 322
#define TK_PLUS 323
#define TK_PRIVILEGE 324
#define TK_RAISE 325
#define TK_RESTRICT 326
#define TK_ROW 327
#define TK_SEMI 328
#define TK_STAR 329
#define TK_STATEMENT 330
#define TK_STRICT 331
#define TK_STRING 332
#define TK_TIMES 333
#define TK_VALUES 334
#define TK_VARIABLE 335
#define TK_VIEW 336
#define TK_WAL 337
#define TK_NK_HINT 261
#define TK_DISTINCT 262
#define TK_WHERE 263
#define TK_PARTITION 264
#define TK_BY 265
#define TK_SESSION 266
#define TK_STATE_WINDOW 267
#define TK_EVENT_WINDOW 268
#define TK_SLIDING 269
#define TK_FILL 270
#define TK_VALUE 271
#define TK_VALUE_F 272
#define TK_NONE 273
#define TK_PREV 274
#define TK_NULL_F 275
#define TK_LINEAR 276
#define TK_NEXT 277
#define TK_HAVING 278
#define TK_RANGE 279
#define TK_EVERY 280
#define TK_ORDER 281
#define TK_SLIMIT 282
#define TK_SOFFSET 283
#define TK_LIMIT 284
#define TK_OFFSET 285
#define TK_ASC 286
#define TK_NULLS 287
#define TK_ABORT 288
#define TK_AFTER 289
#define TK_ATTACH 290
#define TK_BEFORE 291
#define TK_BEGIN 292
#define TK_BITAND 293
#define TK_BITNOT 294
#define TK_BITOR 295
#define TK_BLOCKS 296
#define TK_CHANGE 297
#define TK_COMMA 298
#define TK_CONCAT 299
#define TK_CONFLICT 300
#define TK_COPY 301
#define TK_DEFERRED 302
#define TK_DELIMITERS 303
#define TK_DETACH 304
#define TK_DIVIDE 305
#define TK_DOT 306
#define TK_EACH 307
#define TK_FAIL 308
#define TK_FILE 309
#define TK_FOR 310
#define TK_GLOB 311
#define TK_ID 312
#define TK_IMMEDIATE 313
#define TK_IMPORT 314
#define TK_INITIALLY 315
#define TK_INSTEAD 316
#define TK_ISNULL 317
#define TK_KEY 318
#define TK_MODULES 319
#define TK_NK_BITNOT 320
#define TK_NK_SEMI 321
#define TK_NOTNULL 322
#define TK_OF 323
#define TK_PLUS 324
#define TK_PRIVILEGE 325
#define TK_RAISE 326
#define TK_RESTRICT 327
#define TK_ROW 328
#define TK_SEMI 329
#define TK_STAR 330
#define TK_STATEMENT 331
#define TK_STRICT 332
#define TK_STRING 333
#define TK_TIMES 334
#define TK_VALUES 335
#define TK_VARIABLE 336
#define TK_VIEW 337
#define TK_WAL 338
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601
#define TK_NK_ILLEGAL 602
#define TK_NK_HEX 603 // hex number 0x123
#define TK_NK_OCT 604 // oct number
#define TK_NK_BIN 605 // bin format data 0b111
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601
#define TK_NK_ILLEGAL 602
#define TK_NK_HEX 603 // hex number 0x123
#define TK_NK_OCT 604 // oct number
#define TK_NK_BIN 605 // bin format data 0b111
#define TK_BATCH_SCAN 606
#define TK_NO_BATCH_SCAN 607
#define TK_NK_NIL 65535

View File

@ -269,8 +269,8 @@ typedef struct {
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
#define IS_VAR_DATA_TYPE(t) \
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_AUDIT_H_
#define _TD_AUDIT_H_
#include "tarray.h"
#include "tdef.h"
#include "tlog.h"
#include "tmsg.h"
#include "tjson.h"
#include "tmsgcb.h"
#include "trpc.h"
#include "mnode.h"
#ifdef __cplusplus
extern "C" {
#endif
#define AUDIT_DETAIL_MAX 16000
typedef struct {
const char *server;
uint16_t port;
bool comp;
} SAuditCfg;
int32_t auditInit(const SAuditCfg *pCfg);
void auditSend(SJson *pJson);
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail);
#ifdef __cplusplus
}
#endif
#endif /*_TD_MONITOR_H_*/

View File

@ -97,6 +97,8 @@ int32_t dsPutDataBlock(DataSinkHandle handle, const SInputData* pInput, bool* pC
void dsEndPut(DataSinkHandle handle, uint64_t useconds);
void dsReset(DataSinkHandle handle);
/**
* Get the length of the data returned by the next call to dsGetDataBlock.
* @param handle

View File

@ -41,23 +41,21 @@ typedef struct {
} SLocalFetch;
typedef struct {
void* tqReader;
void* config;
void* vnode;
void* mnd;
SMsgCb* pMsgCb;
int64_t version;
bool initMetaReader;
bool initTableReader;
bool initTqReader;
int32_t numOfVgroups;
void* sContext; // SSnapContext*
void* tqReader; // todo remove it
void* vnode;
void* mnd;
SMsgCb* pMsgCb;
int64_t version;
uint64_t checkpointId;
bool initTableReader;
bool initTqReader;
int32_t numOfVgroups;
void* sContext; // SSnapContext*
void* pStateBackend;
int8_t fillHistory;
STimeWindow winRange;
void* pStateBackend;
struct SStorageAPI api;
int8_t fillHistory;
STimeWindow winRange;
} SReadHandle;
// in queue mode, data streams are seperated by msg
@ -97,9 +95,6 @@ void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId);
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
// todo refactor
void qGetCheckpointVersion(qTaskInfo_t tinfo, int64_t* dataVer, int64_t* ckId);
/**
* Set multiple input data blocks for the stream scan.
* @param tinfo
@ -130,6 +125,10 @@ int32_t qSetSMAInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks,
*/
int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd);
bool qIsDynamicExecTask(qTaskInfo_t tinfo);
void qUpdateOperatorParam(qTaskInfo_t tinfo, void* pParam);
/**
* Create the exec task object according to task json
* @param readHandle
@ -150,7 +149,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
* @return
*/
int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion,
int32_t* tversion);
int32_t* tversion, int32_t idx);
/**
* The main task execution function, including query on both table and multiple tables,

View File

@ -98,6 +98,18 @@ typedef struct SMTbCursor {
int8_t paused;
} SMTbCursor;
typedef struct SMCtbCursor {
SMeta *pMeta;
void *pCur;
tb_uid_t suid;
void *pKey;
void *pVal;
int kLen;
int vLen;
int8_t paused;
int lock;
} SMCtbCursor;
typedef struct SRowBuffPos {
void* pRowBuff;
void* pKey;
@ -278,13 +290,17 @@ typedef struct SStoreMeta {
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
// metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
int64_t (*getNumOfRowsInMem)(void* pVnode);
/**
int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list);
*/
SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock);
int32_t (*resumeCtbCursor)(SMCtbCursor* pCtbCur, int8_t first);
void (*pauseCtbCursor)(SMCtbCursor* pCtbCur);
void (*closeCtbCursor)(SMCtbCursor *pCtbCur);
tb_uid_t (*ctbCursorNext)(SMCtbCursor* pCur);
} SStoreMeta;
typedef struct SStoreMetaReader {
@ -363,7 +379,7 @@ typedef struct SStateStore {
state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark);
SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark, bool igUp);
TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid);
@ -371,7 +387,7 @@ typedef struct SStateStore {
void (*windowSBfDelete)(SUpdateInfo *pInfo, uint64_t count);
void (*windowSBfAdd)(SUpdateInfo *pInfo, uint64_t count);
SUpdateInfo* (*updateInfoInitP)(SInterval* pInterval, int64_t watermark);
SUpdateInfo* (*updateInfoInitP)(SInterval* pInterval, int64_t watermark, bool igUp);
void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo);
void (*updateInfoDestoryColseWinSBF)(SUpdateInfo* pInfo);
int32_t (*updateInfoSerialize)(void* buf, int32_t bufLen, const SUpdateInfo* pInfo);
@ -382,7 +398,8 @@ typedef struct SStateStore {
SStreamStateCur* (*streamStateSessionSeekKeyCurrentNext)(SStreamState* pState, const SSessionKey* key);
struct SStreamFileState* (*streamFileStateInit)(int64_t memSize, uint32_t keySize, uint32_t rowSize,
uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark, const char*id);
uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark,
const char* id, int64_t ckId);
void (*streamFileStateDestroy)(struct SStreamFileState* pFileState);
void (*streamFileStateClear)(struct SStreamFileState* pFileState);

View File

@ -122,6 +122,8 @@ typedef enum EFunctionType {
FUNCTION_TYPE_IROWTS,
FUNCTION_TYPE_ISFILLED,
FUNCTION_TYPE_TAGS,
FUNCTION_TYPE_TBUID,
FUNCTION_TYPE_VGID,
// internal function
FUNCTION_TYPE_SELECT_VALUE = 3750,
@ -157,6 +159,8 @@ typedef enum EFunctionType {
FUNCTION_TYPE_AVG_MERGE,
FUNCTION_TYPE_STDDEV_PARTIAL,
FUNCTION_TYPE_STDDEV_MERGE,
FUNCTION_TYPE_IRATE_PARTIAL,
FUNCTION_TYPE_IRATE_MERGE,
// geometry functions
FUNCTION_TYPE_GEOM_FROM_TEXT = 4250,
@ -231,6 +235,7 @@ bool fmIsCumulativeFunc(int32_t funcId);
bool fmIsInterpPseudoColumnFunc(int32_t funcId);
bool fmIsGroupKeyFunc(int32_t funcId);
bool fmIsBlockDistFunc(int32_t funcId);
bool fmIsConstantResFunc(SFunctionNode* pFunc);
void getLastCacheDataType(SDataType* pType);
SFunctionNode* createFunction(const char* pName, SNodeList* pParameterList);

View File

@ -109,8 +109,8 @@ typedef uint16_t VarDataLenT; // maxVarDataLen: 65535
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
#define IS_VAR_DATA_TYPE(t) \
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
static FORCE_INLINE char *udfColDataGetData(const SUdfColumn *pColumn, int32_t row) {
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {

View File

@ -21,6 +21,7 @@ extern "C" {
#endif
#include "tdef.h"
#include "tmsg.h"
#define nodeType(nodeptr) (((const SNode*)(nodeptr))->type)
#define setNodeType(nodeptr, nodetype) (((SNode*)(nodeptr))->type = (nodetype))
@ -78,208 +79,6 @@ extern "C" {
(list) = NULL; \
} while (0)
typedef enum ENodeType {
// Syntax nodes are used in parser and planner module, and some are also used in executor module, such as COLUMN,
// VALUE, OPERATOR, FUNCTION and so on.
QUERY_NODE_COLUMN = 1,
QUERY_NODE_VALUE,
QUERY_NODE_OPERATOR,
QUERY_NODE_LOGIC_CONDITION,
QUERY_NODE_FUNCTION,
QUERY_NODE_REAL_TABLE,
QUERY_NODE_TEMP_TABLE,
QUERY_NODE_JOIN_TABLE,
QUERY_NODE_GROUPING_SET,
QUERY_NODE_ORDER_BY_EXPR,
QUERY_NODE_LIMIT,
QUERY_NODE_STATE_WINDOW,
QUERY_NODE_SESSION_WINDOW,
QUERY_NODE_INTERVAL_WINDOW,
QUERY_NODE_NODE_LIST,
QUERY_NODE_FILL,
QUERY_NODE_RAW_EXPR, // Only be used in parser module.
QUERY_NODE_TARGET,
QUERY_NODE_DATABLOCK_DESC,
QUERY_NODE_SLOT_DESC,
QUERY_NODE_COLUMN_DEF,
QUERY_NODE_DOWNSTREAM_SOURCE,
QUERY_NODE_DATABASE_OPTIONS,
QUERY_NODE_TABLE_OPTIONS,
QUERY_NODE_INDEX_OPTIONS,
QUERY_NODE_EXPLAIN_OPTIONS,
QUERY_NODE_STREAM_OPTIONS,
QUERY_NODE_LEFT_VALUE,
QUERY_NODE_COLUMN_REF,
QUERY_NODE_WHEN_THEN,
QUERY_NODE_CASE_WHEN,
QUERY_NODE_EVENT_WINDOW,
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100,
QUERY_NODE_SELECT_STMT,
QUERY_NODE_VNODE_MODIFY_STMT,
QUERY_NODE_CREATE_DATABASE_STMT,
QUERY_NODE_DROP_DATABASE_STMT,
QUERY_NODE_ALTER_DATABASE_STMT,
QUERY_NODE_FLUSH_DATABASE_STMT,
QUERY_NODE_TRIM_DATABASE_STMT,
QUERY_NODE_CREATE_TABLE_STMT,
QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
QUERY_NODE_CREATE_MULTI_TABLES_STMT,
QUERY_NODE_DROP_TABLE_CLAUSE,
QUERY_NODE_DROP_TABLE_STMT,
QUERY_NODE_DROP_SUPER_TABLE_STMT,
QUERY_NODE_ALTER_TABLE_STMT,
QUERY_NODE_ALTER_SUPER_TABLE_STMT,
QUERY_NODE_CREATE_USER_STMT,
QUERY_NODE_ALTER_USER_STMT,
QUERY_NODE_DROP_USER_STMT,
QUERY_NODE_USE_DATABASE_STMT,
QUERY_NODE_CREATE_DNODE_STMT,
QUERY_NODE_DROP_DNODE_STMT,
QUERY_NODE_ALTER_DNODE_STMT,
QUERY_NODE_CREATE_INDEX_STMT,
QUERY_NODE_DROP_INDEX_STMT,
QUERY_NODE_CREATE_QNODE_STMT,
QUERY_NODE_DROP_QNODE_STMT,
QUERY_NODE_CREATE_BNODE_STMT,
QUERY_NODE_DROP_BNODE_STMT,
QUERY_NODE_CREATE_SNODE_STMT,
QUERY_NODE_DROP_SNODE_STMT,
QUERY_NODE_CREATE_MNODE_STMT,
QUERY_NODE_DROP_MNODE_STMT,
QUERY_NODE_CREATE_TOPIC_STMT,
QUERY_NODE_DROP_TOPIC_STMT,
QUERY_NODE_DROP_CGROUP_STMT,
QUERY_NODE_ALTER_LOCAL_STMT,
QUERY_NODE_EXPLAIN_STMT,
QUERY_NODE_DESCRIBE_STMT,
QUERY_NODE_RESET_QUERY_CACHE_STMT,
QUERY_NODE_COMPACT_DATABASE_STMT,
QUERY_NODE_CREATE_FUNCTION_STMT,
QUERY_NODE_DROP_FUNCTION_STMT,
QUERY_NODE_CREATE_STREAM_STMT,
QUERY_NODE_DROP_STREAM_STMT,
QUERY_NODE_BALANCE_VGROUP_STMT,
QUERY_NODE_MERGE_VGROUP_STMT,
QUERY_NODE_REDISTRIBUTE_VGROUP_STMT,
QUERY_NODE_SPLIT_VGROUP_STMT,
QUERY_NODE_SYNCDB_STMT,
QUERY_NODE_GRANT_STMT,
QUERY_NODE_REVOKE_STMT,
QUERY_NODE_SHOW_DNODES_STMT,
QUERY_NODE_SHOW_MNODES_STMT,
QUERY_NODE_SHOW_MODULES_STMT,
QUERY_NODE_SHOW_QNODES_STMT,
QUERY_NODE_SHOW_SNODES_STMT,
QUERY_NODE_SHOW_BNODES_STMT,
QUERY_NODE_SHOW_CLUSTER_STMT,
QUERY_NODE_SHOW_DATABASES_STMT,
QUERY_NODE_SHOW_FUNCTIONS_STMT,
QUERY_NODE_SHOW_INDEXES_STMT,
QUERY_NODE_SHOW_STABLES_STMT,
QUERY_NODE_SHOW_STREAMS_STMT,
QUERY_NODE_SHOW_TABLES_STMT,
QUERY_NODE_SHOW_TAGS_STMT,
QUERY_NODE_SHOW_USERS_STMT,
QUERY_NODE_SHOW_LICENCES_STMT,
QUERY_NODE_SHOW_VGROUPS_STMT,
QUERY_NODE_SHOW_TOPICS_STMT,
QUERY_NODE_SHOW_CONSUMERS_STMT,
QUERY_NODE_SHOW_CONNECTIONS_STMT,
QUERY_NODE_SHOW_QUERIES_STMT,
QUERY_NODE_SHOW_APPS_STMT,
QUERY_NODE_SHOW_VARIABLES_STMT,
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_USER_PRIVILEGES_STMT,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
QUERY_NODE_SHOW_SCORES_STMT,
QUERY_NODE_SHOW_TABLE_TAGS_STMT,
QUERY_NODE_KILL_CONNECTION_STMT,
QUERY_NODE_KILL_QUERY_STMT,
QUERY_NODE_KILL_TRANSACTION_STMT,
QUERY_NODE_DELETE_STMT,
QUERY_NODE_INSERT_STMT,
QUERY_NODE_QUERY,
QUERY_NODE_SHOW_DB_ALIVE_STMT,
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
QUERY_NODE_RESTORE_DNODE_STMT,
QUERY_NODE_RESTORE_QNODE_STMT,
QUERY_NODE_RESTORE_MNODE_STMT,
QUERY_NODE_RESTORE_VNODE_STMT,
QUERY_NODE_PAUSE_STREAM_STMT,
QUERY_NODE_RESUME_STREAM_STMT,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
QUERY_NODE_LOGIC_PLAN_JOIN,
QUERY_NODE_LOGIC_PLAN_AGG,
QUERY_NODE_LOGIC_PLAN_PROJECT,
QUERY_NODE_LOGIC_PLAN_VNODE_MODIFY,
QUERY_NODE_LOGIC_PLAN_EXCHANGE,
QUERY_NODE_LOGIC_PLAN_MERGE,
QUERY_NODE_LOGIC_PLAN_WINDOW,
QUERY_NODE_LOGIC_PLAN_FILL,
QUERY_NODE_LOGIC_PLAN_SORT,
QUERY_NODE_LOGIC_PLAN_PARTITION,
QUERY_NODE_LOGIC_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_LOGIC_PLAN_INTERP_FUNC,
QUERY_NODE_LOGIC_SUBPLAN,
QUERY_NODE_LOGIC_PLAN,
// physical plan node
QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN,
QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN,
QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN,
QUERY_NODE_PHYSICAL_PLAN_PROJECT,
QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN,
QUERY_NODE_PHYSICAL_PLAN_HASH_AGG,
QUERY_NODE_PHYSICAL_PLAN_EXCHANGE,
QUERY_NODE_PHYSICAL_PLAN_MERGE,
QUERY_NODE_PHYSICAL_PLAN_SORT,
QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT,
QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_FILL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION,
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
QUERY_NODE_PHYSICAL_PLAN_INSERT,
QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT,
QUERY_NODE_PHYSICAL_PLAN_DELETE,
QUERY_NODE_PHYSICAL_SUBPLAN,
QUERY_NODE_PHYSICAL_PLAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT
} ENodeType;
/**
* The first field of a node of any type is guaranteed to be the ENodeType.
* Hence the type of any node can be gotten by casting it to SNode.

View File

@ -42,10 +42,13 @@ typedef enum EGroupAction {
typedef struct SLogicNode {
ENodeType type;
bool dynamicOp;
bool stmtRoot;
SNodeList* pTargets; // SColumnNode
SNode* pConditions;
SNodeList* pChildren;
struct SLogicNode* pParent;
SNodeList* pHint;
int32_t optimizedFlag;
uint8_t precision;
SNode* pLimit;
@ -107,15 +110,21 @@ typedef struct SScanLogicNode {
bool sortPrimaryKey;
bool igLastNull;
bool groupOrderScan;
bool onlyMetaCtbIdx; // for tag scan with no tbname
} SScanLogicNode;
typedef struct SJoinLogicNode {
SLogicNode node;
EJoinType joinType;
SNode* pMergeCondition;
SNode* pOnConditions;
bool isSingleTableJoin;
SNode* pColEqualOnConditions;
SLogicNode node;
EJoinType joinType;
EJoinAlgorithm joinAlgo;
SNode* pPrimKeyEqCond;
SNode* pColEqCond;
SNode* pTagEqCond;
SNode* pTagOnCond;
SNode* pOtherOnCond;
bool isSingleTableJoin;
bool hasSubQuery;
bool isLowLevelJoin;
} SJoinLogicNode;
typedef struct SAggLogicNode {
@ -154,6 +163,28 @@ typedef struct SInterpFuncLogicNode {
SNode* pTimeSeries; // SColumnNode
} SInterpFuncLogicNode;
typedef struct SGroupCacheLogicNode {
SLogicNode node;
bool grpColsMayBeNull;
bool grpByUid;
bool globalGrp;
bool batchFetch;
SNodeList* pGroupCols;
} SGroupCacheLogicNode;
typedef struct SDynQueryCtrlStbJoin {
bool batchFetch;
SNodeList* pVgList;
SNodeList* pUidList;
bool srcScan[2];
} SDynQueryCtrlStbJoin;
typedef struct SDynQueryCtrlLogicNode {
SLogicNode node;
EDynQueryType qType;
SDynQueryCtrlStbJoin stbJoin;
} SDynQueryCtrlLogicNode;
typedef enum EModifyTableType { MODIFY_TABLE_TYPE_INSERT = 1, MODIFY_TABLE_TYPE_DELETE } EModifyTableType;
typedef struct SVnodeModifyLogicNode {
@ -312,6 +343,7 @@ typedef struct SDataBlockDescNode {
typedef struct SPhysiNode {
ENodeType type;
bool dynamicOp;
EOrder inputTsOrder;
EOrder outputTsOrder;
SDataBlockDescNode* pOutputDataBlockDesc;
@ -334,7 +366,11 @@ typedef struct SScanPhysiNode {
bool groupOrderScan;
} SScanPhysiNode;
typedef SScanPhysiNode STagScanPhysiNode;
typedef struct STagScanPhysiNode {
SScanPhysiNode scan;
bool onlyMetaCtbIdx; //no tbname, tag index not used.
} STagScanPhysiNode;
typedef SScanPhysiNode SBlockDistScanPhysiNode;
typedef struct SLastRowScanPhysiNode {
@ -409,12 +445,50 @@ typedef struct SInterpFuncPhysiNode {
typedef struct SSortMergeJoinPhysiNode {
SPhysiNode node;
EJoinType joinType;
SNode* pMergeCondition;
SNode* pOnConditions;
SNode* pPrimKeyCond;
SNode* pColEqCond;
SNode* pOtherOnCond;
SNodeList* pTargets;
SNode* pColEqualOnConditions;
} SSortMergeJoinPhysiNode;
typedef struct SHashJoinPhysiNode {
SPhysiNode node;
EJoinType joinType;
SNodeList* pOnLeft;
SNodeList* pOnRight;
SNode* pFilterConditions;
SNodeList* pTargets;
SQueryStat inputStat[2];
SNode* pPrimKeyCond;
SNode* pColEqCond;
SNode* pTagEqCond;
} SHashJoinPhysiNode;
typedef struct SGroupCachePhysiNode {
SPhysiNode node;
bool grpColsMayBeNull;
bool grpByUid;
bool globalGrp;
bool batchFetch;
SNodeList* pGroupCols;
} SGroupCachePhysiNode;
typedef struct SStbJoinDynCtrlBasic {
bool batchFetch;
int32_t vgSlot[2];
int32_t uidSlot[2];
bool srcScan[2];
} SStbJoinDynCtrlBasic;
typedef struct SDynQueryCtrlPhysiNode {
SPhysiNode node;
EDynQueryType qType;
union {
SStbJoinDynCtrlBasic stbJoin;
};
} SDynQueryCtrlPhysiNode;
typedef struct SAggPhysiNode {
SPhysiNode node;
SNodeList* pExprs; // these are expression list of group_by_clause and parameter expression of aggregate function
@ -603,6 +677,8 @@ typedef struct SSubplan {
SNode* pTagCond;
SNode* pTagIndexCond;
bool showRewrite;
int32_t rowsThreshold;
bool dynamicRowThreshold;
} SSubplan;
typedef enum EExplainMode { EXPLAIN_MODE_DISABLE = 1, EXPLAIN_MODE_STATIC, EXPLAIN_MODE_ANALYZE } EExplainMode;

View File

@ -116,6 +116,17 @@ typedef struct SLeftValueNode {
ENodeType type;
} SLeftValueNode;
typedef enum EHintOption {
HINT_NO_BATCH_SCAN = 1,
HINT_BATCH_SCAN,
} EHintOption;
typedef struct SHintNode {
ENodeType type;
EHintOption option;
void* value;
} SHintNode;
typedef struct SOperatorNode {
SExprNode node; // QUERY_NODE_OPERATOR
EOperatorType opType;
@ -169,11 +180,27 @@ typedef struct STempTableNode {
SNode* pSubquery;
} STempTableNode;
typedef enum EJoinType { JOIN_TYPE_INNER = 1 } EJoinType;
typedef enum EJoinType {
JOIN_TYPE_INNER = 1,
JOIN_TYPE_LEFT,
JOIN_TYPE_RIGHT,
} EJoinType;
typedef enum EJoinAlgorithm {
JOIN_ALGO_UNKNOWN = 0,
JOIN_ALGO_MERGE,
JOIN_ALGO_HASH,
} EJoinAlgorithm;
typedef enum EDynQueryType {
DYN_QTYPE_STB_HASH = 1,
} EDynQueryType;
typedef struct SJoinTableNode {
STableNode table; // QUERY_NODE_JOIN_TABLE
EJoinType joinType;
bool hasSubQuery;
bool isLowLevelJoin;
SNode* pLeft;
SNode* pRight;
SNode* pOnCond;
@ -289,6 +316,7 @@ typedef struct SSelectStmt {
SLimitNode* pLimit;
SLimitNode* pSlimit;
STimeWindow timeRange;
SNodeList* pHint;
char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision;
int32_t selectFuncNum;
@ -470,7 +498,7 @@ int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char*
int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols);
typedef bool (*FFuncClassifier)(int32_t funcId);
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifier classifier, SNodeList** pFuncs);
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList** pFuncs);
int32_t nodesCollectSpecialNodes(SSelectStmt* pSelect, ESqlClause clause, ENodeType type, SNodeList** pNodes);

View File

@ -114,6 +114,7 @@ int32_t smlBuildRow(STableDataCxt* pTableCxt);
int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* kv, int32_t index);
STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta);
void clearColValArraySml(SArray* pCols);
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
char* msgBuf, int32_t msgBufLen);

View File

@ -90,11 +90,6 @@ typedef struct SExecResult {
void* res;
} SExecResult;
typedef struct STbVerInfo {
char tbFName[TSDB_TABLE_FNAME_LEN];
int32_t sversion;
int32_t tversion;
} STbVerInfo;
#pragma pack(push, 1)
typedef struct SCTableMeta {
@ -212,6 +207,11 @@ typedef struct SQueryNodeStat {
int32_t tableNum; // vg table number, unit is TSDB_TABLE_NUM_UNIT
} SQueryNodeStat;
typedef struct SQueryStat {
int64_t inputRowNum;
int32_t inputRowSize;
} SQueryStat;
int32_t initTaskQueue();
int32_t cleanupTaskQueue();

View File

@ -45,6 +45,7 @@ typedef struct {
uint64_t cqueryProcessed;
uint64_t fetchProcessed;
uint64_t dropProcessed;
uint64_t notifyProcessed;
uint64_t hbProcessed;
uint64_t deleteProcessed;
@ -90,6 +91,8 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, in
int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessNotifyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SDeleteRes *pRes);

View File

@ -95,6 +95,8 @@ int32_t qStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p
int32_t qEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qTbnameFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qTbUidFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qVgIdFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
/* Aggregation functions */
int32_t countScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _STREAM_BACKEDN_SNAPSHOT_H_
#define _STREAM_BACKEDN_SNAPSHOT_H_
#include "tcommon.h"
#define STREAM_STATE_TRANSFER "stream-state-transfer"
typedef struct SStreamSnapReader SStreamSnapReader;
typedef struct SStreamSnapWriter SStreamSnapWriter;
typedef struct SStreamSnapHandle SStreamSnapHandle;
typedef struct SStreamSnapBlockHdr SStreamSnapBlockHdr;
int32_t streamSnapReaderOpen(void* pMeta, int64_t sver, int64_t ever, char* path, SStreamSnapReader** ppReader);
int32_t streamSnapReaderClose(SStreamSnapReader* pReader);
int32_t streamSnapRead(SStreamSnapReader* pReader, uint8_t** ppData, int64_t* size);
// SMetaSnapWriter ========================================
int32_t streamSnapWriterOpen(void* pMeta, int64_t sver, int64_t ever, char* path, SStreamSnapWriter** ppWriter);
int32_t streamSnapWrite(SStreamSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
int32_t streamSnapWriterClose(SStreamSnapWriter* ppWriter, int8_t rollback);
#endif

View File

@ -20,6 +20,7 @@
#include "tmsg.h"
#include "tmsgcb.h"
#include "tqueue.h"
#include "ttimer.h"
#ifdef __cplusplus
extern "C" {
@ -30,7 +31,7 @@ extern "C" {
typedef struct SStreamTask SStreamTask;
#define SSTREAM_TASK_VER 1
#define SSTREAM_TASK_VER 2
enum {
STREAM_STATUS__NORMAL = 0,
STREAM_STATUS__STOP,
@ -48,6 +49,7 @@ enum {
TASK_STATUS__SCAN_HISTORY, // stream task scan history data by using tsdbread in the stream scanner
TASK_STATUS__HALT, // pause, but not be manipulated by user command
TASK_STATUS__PAUSE, // pause
TASK_STATUS__CK, // stream task is in checkpoint status, no data are allowed to put into inputQ anymore
};
enum {
@ -61,15 +63,12 @@ enum {
enum {
TASK_INPUT_STATUS__NORMAL = 1,
TASK_INPUT_STATUS__BLOCKED,
TASK_INPUT_STATUS__RECOVER,
TASK_INPUT_STATUS__STOP,
TASK_INPUT_STATUS__FAILED,
};
enum {
TASK_OUTPUT_STATUS__NORMAL = 1,
TASK_OUTPUT_STATUS__WAIT,
TASK_OUTPUT_STATUS__BLOCKED,
};
enum {
@ -97,11 +96,16 @@ enum {
STREAM_QUEUE__PROCESSING,
};
enum {
STREAM_META_WILL_STOP = 1,
STREAM_META_OK_TO_STOP = 2,
};
typedef struct {
int8_t type;
} SStreamQueueItem;
typedef void FTbSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
typedef void FTbSink(SStreamTask* pTask, void* vnode, void* data);
typedef int32_t FTaskExpand(void* ahandle, SStreamTask* pTask, int64_t ver);
typedef struct {
@ -119,14 +123,13 @@ typedef struct {
} SStreamMergedSubmit;
typedef struct {
int8_t type;
int8_t type;
int64_t nodeId; // nodeId, from SStreamMeta
int32_t srcVgId;
int32_t srcTaskId;
int32_t childId;
int64_t sourceVer;
int64_t reqId;
SArray* blocks; // SArray<SSDataBlock>
} SStreamDataBlock;
@ -136,10 +139,6 @@ typedef struct {
SSDataBlock* pBlock;
} SStreamRefDataBlock;
typedef struct {
int8_t type;
} SStreamCheckpoint;
typedef struct {
int8_t type;
SSDataBlock* pBlock;
@ -179,7 +178,7 @@ SStreamQueueRes streamQueueGetRes(SStreamQueue1* pQueue);
#endif
typedef struct {
STaosQueue* queue;
STaosQueue* pQueue;
STaosQall* qall;
void* qItem;
int8_t status;
@ -190,19 +189,9 @@ void streamCleanUp();
SStreamQueue* streamQueueOpen(int64_t cap);
void streamQueueClose(SStreamQueue* pQueue, int32_t taskId);
static FORCE_INLINE void streamQueueProcessSuccess(SStreamQueue* queue) {
ASSERT(atomic_load_8(&queue->status) == STREAM_QUEUE__PROCESSING);
queue->qItem = NULL;
atomic_store_8(&queue->status, STREAM_QUEUE__SUCESS);
}
static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) {
ASSERT(atomic_load_8(&queue->status) == STREAM_QUEUE__PROCESSING);
atomic_store_8(&queue->status, STREAM_QUEUE__FAILED);
}
void* streamQueueNextItem(SStreamQueue* pQueue);
void streamQueueProcessSuccess(SStreamQueue* queue);
void streamQueueProcessFail(SStreamQueue* queue);
void* streamQueueNextItem(SStreamQueue* pQueue);
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type);
void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
@ -252,20 +241,26 @@ typedef struct SStreamChildEpInfo {
int32_t nodeId;
int32_t childId;
int32_t taskId;
int8_t dataAllowed;
SEpSet epSet;
bool dataAllowed; // denote if the data from this upstream task is allowed to put into inputQ, not serialize it
int64_t stage; // upstream task stage value, to denote if the upstream node has restart/replica changed/transfer
} SStreamChildEpInfo;
typedef struct SStreamId {
typedef struct SStreamTaskKey {
int64_t streamId;
int32_t taskId;
} SStreamTaskKey;
typedef struct SStreamTaskId {
int64_t streamId;
int32_t taskId;
const char* idStr;
} SStreamId;
} SStreamTaskId;
typedef struct SCheckpointInfo {
int64_t id;
int64_t version; // offset in WAL
int64_t currentVer; // current offset in WAL, not serialize it
int64_t checkpointId;
int64_t checkpointVer; // latest checkpointId version
int64_t currentVer; // current offset in WAL, not serialize it
} SCheckpointInfo;
typedef struct SStreamStatus {
@ -273,24 +268,25 @@ typedef struct SStreamStatus {
int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set
int8_t schedStatus;
int8_t keepTaskStatus;
bool transferState;
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
int8_t timerActive; // timer is active
int8_t pauseAllowed; // allowed task status to be set to be paused
int8_t timerActive; // timer is active
int8_t pauseAllowed; // allowed task status to be set to be paused
} SStreamStatus;
typedef struct SHistDataRange {
typedef struct SDataRange {
SVersionRange range;
STimeWindow window;
} SHistDataRange;
} SDataRange;
typedef struct SSTaskBasicInfo {
int32_t nodeId; // vgroup id or snode id
SEpSet epSet;
SEpSet mnodeEpset; // mnode epset for send heartbeat
int32_t selfChildId;
int32_t totalLevel;
int8_t taskLevel;
int8_t fillHistory; // is fill history task or not
int64_t triggerParam; // in msec
} SSTaskBasicInfo;
typedef struct SDispatchMsgInfo {
@ -300,12 +296,22 @@ typedef struct SDispatchMsgInfo {
int64_t blockingTs; // output blocking timestamp
} SDispatchMsgInfo;
typedef struct {
typedef struct STaskOutputInfo {
int8_t type;
int8_t status;
SStreamQueue* queue;
} STaskOutputInfo;
typedef struct STaskInputInfo {
int8_t status;
SStreamQueue* queue;
} STaskInputInfo;
typedef struct STaskSchedInfo {
int8_t status;
void* pTimer;
} STaskSchedInfo;
typedef struct {
int64_t init;
int64_t step1Start;
@ -314,20 +320,23 @@ typedef struct {
struct SStreamTask {
int64_t ver;
SStreamId id;
SStreamTaskId id;
SSTaskBasicInfo info;
STaskOutputInfo outputInfo;
STaskInputInfo inputInfo;
STaskSchedInfo schedInfo;
SDispatchMsgInfo msgInfo;
SStreamStatus status;
SCheckpointInfo chkInfo;
STaskExec exec;
SHistDataRange dataRange;
SStreamId historyTaskId;
SStreamId streamTaskId;
SArray* pUpstreamEpInfoList; // SArray<SStreamChildEpInfo*>, // children info
int32_t nextCheckId;
SArray* checkpointInfo; // SArray<SStreamCheckpointInfo>
SDataRange dataRange;
SStreamTaskId historyTaskId;
SStreamTaskId streamTaskId;
STaskTimestamp tsInfo;
SArray* pReadyMsgList; // SArray<SStreamChkptReadyInfo*>
TdThreadMutex lock; // secure the operation of set task status and puting data into inputQ
SArray* pUpstreamInfoList;
// output
union {
STaskDispatcherFixedEp fixedEpDispatcher;
@ -337,18 +346,10 @@ struct SStreamTask {
STaskSinkFetch fetchSink;
};
int8_t inputStatus;
SStreamQueue* inputQueue;
// trigger
int8_t triggerStatus;
int64_t triggerParam;
void* schedTimer;
void* launchTaskTimer;
SMsgCb* pMsgCb; // msg handle
SStreamState* pState; // state backend
SArray* pRspMsgList;
TdThreadMutex lock;
// the followings attributes don't be serialized
int32_t notReadyTasks;
@ -358,11 +359,19 @@ struct SStreamTask {
int32_t refCnt;
int64_t checkpointingId;
int32_t checkpointAlignCnt;
int32_t checkpointNotReadyTasks;
int32_t transferStateAlignCnt;
struct SStreamMeta* pMeta;
SSHashObj* pNameMap;
char reserve[256];
};
typedef struct SMetaHbInfo {
tmr_h hbTmr;
int32_t stopFlag;
int32_t tickCounter;
} SMetaHbInfo;
// meta
typedef struct SStreamMeta {
char* path;
@ -375,12 +384,25 @@ typedef struct SStreamMeta {
TXN* txn;
FTaskExpand* expandFunc;
int32_t vgId;
int64_t stage;
SRWLatch lock;
int32_t walScanCounter;
void* streamBackend;
int64_t streamBackendRid;
SHashObj* pTaskBackendUnique;
TdThreadMutex backendMutex;
SMetaHbInfo hbInfo;
int32_t closedTask;
int32_t totalTasks; // this value should be increased when a new task is added into the meta
int32_t chkptNotReadyTasks;
int64_t rid;
int64_t chkpId;
SArray* chkpSaved;
SArray* chkpInUse;
int32_t chkpCap;
SRWLatch chkpDirLock;
int32_t pauseTaskNum;
} SStreamMeta;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
@ -391,8 +413,14 @@ SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, int8_t fillHisto
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
void tFreeStreamTask(SStreamTask* pTask);
int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem);
bool tInputQueueIsFull(const SStreamTask* pTask);
int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver);
int32_t tDecodeStreamTaskChkInfo(SDecoder* pDecoder, SCheckpointInfo* pChkpInfo);
int32_t tDecodeStreamTaskId(SDecoder* pDecoder, SStreamTaskId* pTaskId);
int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem);
int32_t streamTaskPutTranstateIntoInputQ(SStreamTask* pTask);
bool streamQueueIsFull(const STaosQueue* pQueue);
typedef struct {
SMsgHead head;
@ -401,8 +429,9 @@ typedef struct {
} SStreamTaskRunReq;
typedef struct {
int64_t streamId;
int32_t type;
int64_t stage; // nodeId from upstream task
int64_t streamId;
int32_t taskId;
int32_t srcVgId;
int32_t upstreamTaskId;
@ -443,6 +472,7 @@ typedef struct {
typedef struct {
int64_t reqId;
int64_t stage;
int64_t streamId;
int32_t upstreamNodeId;
int32_t upstreamTaskId;
@ -459,6 +489,7 @@ typedef struct {
int32_t downstreamNodeId;
int32_t downstreamTaskId;
int32_t childId;
int32_t oldStage;
int8_t status;
} SStreamTaskCheckRsp;
@ -485,6 +516,8 @@ typedef struct {
int64_t checkpointId;
int32_t taskId;
int32_t nodeId;
SEpSet mgmtEps;
int32_t mnodeId;
int64_t expireTime;
} SStreamCheckpointSourceReq;
@ -493,14 +526,16 @@ typedef struct {
int64_t checkpointId;
int32_t taskId;
int32_t nodeId;
int32_t mnodeId;
int64_t expireTime;
int8_t success;
} SStreamCheckpointSourceRsp;
int32_t tEncodeSStreamCheckpointSourceReq(SEncoder* pEncoder, const SStreamCheckpointSourceReq* pReq);
int32_t tDecodeSStreamCheckpointSourceReq(SDecoder* pDecoder, SStreamCheckpointSourceReq* pReq);
int32_t tEncodeStreamCheckpointSourceReq(SEncoder* pEncoder, const SStreamCheckpointSourceReq* pReq);
int32_t tDecodeStreamCheckpointSourceReq(SDecoder* pDecoder, SStreamCheckpointSourceReq* pReq);
int32_t tEncodeSStreamCheckpointSourceRsp(SEncoder* pEncoder, const SStreamCheckpointSourceRsp* pRsp);
int32_t tDecodeSStreamCheckpointSourceRsp(SDecoder* pDecoder, SStreamCheckpointSourceRsp* pRsp);
int32_t tEncodeStreamCheckpointSourceRsp(SEncoder* pEncoder, const SStreamCheckpointSourceRsp* pRsp);
int32_t tDecodeStreamCheckpointSourceRsp(SDecoder* pDecoder, SStreamCheckpointSourceRsp* pRsp);
typedef struct {
SMsgHead msgHead;
@ -511,28 +546,25 @@ typedef struct {
int32_t upstreamTaskId;
int32_t upstreamNodeId;
int32_t childId;
int64_t expireTime;
int8_t taskLevel;
} SStreamCheckpointReq;
} SStreamCheckpointReadyMsg;
typedef struct {
SMsgHead msgHead;
int64_t streamId;
int64_t checkpointId;
int32_t downstreamTaskId;
int32_t downstreamNodeId;
int32_t upstreamTaskId;
int32_t upstreamNodeId;
int32_t childId;
int64_t expireTime;
int8_t taskLevel;
} SStreamCheckpointRsp;
int32_t tEncodeStreamCheckpointReadyMsg(SEncoder* pEncoder, const SStreamCheckpointReadyMsg* pRsp);
int32_t tDecodeStreamCheckpointReadyMsg(SDecoder* pDecoder, SStreamCheckpointReadyMsg* pRsp);
int32_t tEncodeSStreamCheckpointReq(SEncoder* pEncoder, const SStreamCheckpointReq* pReq);
int32_t tDecodeSStreamCheckpointReq(SDecoder* pDecoder, SStreamCheckpointReq* pReq);
typedef struct STaskStatusEntry {
int64_t streamId;
int32_t taskId;
int32_t status;
} STaskStatusEntry;
int32_t tEncodeSStreamCheckpointRsp(SEncoder* pEncoder, const SStreamCheckpointRsp* pRsp);
int32_t tDecodeSStreamCheckpointRsp(SDecoder* pDecoder, SStreamCheckpointRsp* pRsp);
typedef struct SStreamHbMsg {
int32_t vgId;
int32_t numOfTasks;
SArray* pTaskStatus; // SArray<SStreamTaskStatusEntry>
} SStreamHbMsg;
int32_t tEncodeStreamHbMsg(SEncoder* pEncoder, const SStreamHbMsg* pRsp);
int32_t tDecodeStreamHbMsg(SDecoder* pDecoder, SStreamHbMsg* pRsp);
typedef struct {
int64_t streamId;
@ -545,6 +577,21 @@ typedef struct {
int32_t tEncodeCompleteHistoryDataMsg(SEncoder* pEncoder, const SStreamCompleteHistoryMsg* pReq);
int32_t tDecodeCompleteHistoryDataMsg(SDecoder* pDecoder, SStreamCompleteHistoryMsg* pReq);
typedef struct SNodeUpdateInfo {
int32_t nodeId;
SEpSet prevEp;
SEpSet newEp;
} SNodeUpdateInfo;
typedef struct SStreamTaskNodeUpdateMsg {
int64_t streamId;
int32_t taskId;
SArray* pNodeList; // SArray<SNodeUpdateInfo>
} SStreamTaskNodeUpdateMsg;
int32_t tEncodeStreamTaskUpdateMsg(SEncoder* pEncoder, const SStreamTaskNodeUpdateMsg* pMsg);
int32_t tDecodeStreamTaskUpdateMsg(SDecoder* pDecoder, SStreamTaskNodeUpdateMsg* pMsg);
typedef struct {
int64_t streamId;
int32_t downstreamTaskId;
@ -564,16 +611,11 @@ int32_t tDecodeStreamTaskCheckReq(SDecoder* pDecoder, SStreamTaskCheckReq* pReq)
int32_t tEncodeStreamTaskCheckRsp(SEncoder* pEncoder, const SStreamTaskCheckRsp* pRsp);
int32_t tDecodeStreamTaskCheckRsp(SDecoder* pDecoder, SStreamTaskCheckRsp* pRsp);
int32_t tEncodeSStreamTaskScanHistoryReq(SEncoder* pEncoder, const SStreamRecoverDownstreamReq* pReq);
int32_t tDecodeSStreamTaskScanHistoryReq(SDecoder* pDecoder, SStreamRecoverDownstreamReq* pReq);
int32_t tEncodeSStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamRecoverDownstreamRsp* pRsp);
int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstreamRsp* pRsp);
int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq);
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq);
void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
@ -581,10 +623,9 @@ int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
int32_t streamProcessRunReq(SStreamTask* pTask);
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec);
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code);
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId);
void streamTaskInputFail(SStreamTask* pTask);
int32_t streamTryExec(SStreamTask* pTask);
@ -594,16 +635,19 @@ bool streamTaskShouldStop(const SStreamStatus* pStatus);
bool streamTaskShouldPause(const SStreamStatus* pStatus);
bool streamTaskIsIdle(const SStreamTask* pTask);
SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId);
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize);
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize);
void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen);
char* createStreamTaskIdStr(int64_t streamId, int32_t taskId);
// recover and fill history
void streamTaskCheckDownstreamTasks(SStreamTask* pTask);
int32_t streamTaskDoCheckDownstreamTasks(SStreamTask* pTask);
void streamTaskCheckDownstream(SStreamTask* pTask);
int32_t streamTaskLaunchScanHistory(SStreamTask* pTask);
int32_t streamTaskCheckStatus(SStreamTask* pTask);
int32_t streamTaskCheckStatus(SStreamTask* pTask, int32_t upstreamTaskId, int32_t vgId, int64_t stage);
int32_t streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList);
void streamTaskResetUpstreamStageInfo(SStreamTask* pTask);
int32_t streamTaskStop(SStreamTask* pTask);
int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq* pReq, SStreamTaskCheckRsp* pRsp,
SRpcHandleInfo* pRpcInfo, int32_t taskId);
int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp);
@ -611,17 +655,26 @@ int32_t streamLaunchFillHistoryTask(SStreamTask* pTask);
int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask);
int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated);
bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer);
int32_t streamTaskGetInputQItems(const SStreamTask* pTask);
// common
int32_t streamRestoreParam(SStreamTask* pTask);
int32_t streamSetStatusNormal(SStreamTask* pTask);
const char* streamGetTaskStatusStr(int32_t status);
void streamTaskPause(SStreamTask* pTask);
void streamTaskResume(SStreamTask* pTask);
void streamTaskPause(SStreamTask* pTask, SStreamMeta* pMeta);
void streamTaskResume(SStreamTask* pTask, SStreamMeta* pMeta);
void streamTaskHalt(SStreamTask* pTask);
void streamTaskResumeFromHalt(SStreamTask* pTask);
void streamTaskDisablePause(SStreamTask* pTask);
void streamTaskEnablePause(SStreamTask* pTask);
int32_t streamTaskSetUpstreamInfo(SStreamTask* pTask, const SStreamTask* pUpstreamTask);
void streamTaskUpdateUpstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
void streamTaskUpdateDownstreamInfo(SStreamTask* pTask, int32_t nodeId, const SEpSet* pEpSet);
void streamTaskSetFixedDownstreamInfo(SStreamTask* pTask, const SStreamTask* pDownstreamTask);
int32_t streamTaskReleaseState(SStreamTask* pTask);
int32_t streamTaskReloadState(SStreamTask* pTask);
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
// source level
int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow);
@ -629,8 +682,6 @@ int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* p
int32_t streamSourceScanHistoryData(SStreamTask* pTask);
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
int32_t appendTranstateIntoInputQ(SStreamTask* pTask);
// agg level
int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask);
int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq,
@ -640,31 +691,32 @@ int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask);
// stream task meta
void streamMetaInit();
void streamMetaCleanup();
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId);
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc, int32_t vgId, int64_t stage);
void streamMetaClose(SStreamMeta* streamMeta);
// save to b-tree meta store
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId);
int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); // save to stream meta store
int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int64_t* pKey);
int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded);
int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); // todo remove it
int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta);
int32_t streamMetaGetNumOfStreamTasks(SStreamMeta* pMeta);
SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId);
void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask);
int32_t streamMetaBegin(SStreamMeta* pMeta);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver);
int32_t streamMetaReopen(SStreamMeta* pMeta, int64_t chkpId);
int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaLoadAllTasks(SStreamMeta* pMeta);
void streamMetaNotifyClose(SStreamMeta* pMeta);
// checkpoint
int32_t streamProcessCheckpointSourceReq(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
int32_t streamProcessCheckpointReq(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointReq* pReq);
int32_t streamProcessCheckpointRsp(SStreamMeta* pMeta, SStreamTask* pTask, SStreamCheckpointRsp* pRsp);
int32_t streamProcessCheckpointSourceReq(SStreamTask* pTask, SStreamCheckpointSourceReq* pReq);
int32_t streamProcessCheckpointReadyMsg(SStreamTask* pTask);
int32_t streamTaskReleaseState(SStreamTask* pTask);
int32_t streamTaskReloadState(SStreamTask* pTask);
int32_t streamAlignTransferState(SStreamTask* pTask);
int32_t streamAddCheckpointSourceRspMsg(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SStreamTask* pTask,
int8_t isSucceed);
int32_t buildCheckpointSourceRsp(SStreamCheckpointSourceReq* pReq, SRpcHandleInfo* pRpcInfo, SRpcMsg* pMsg,
int8_t isSucceed);
#ifdef __cplusplus
}
#endif

View File

@ -31,7 +31,8 @@ typedef struct SStreamFileState SStreamFileState;
typedef SList SStreamSnapshot;
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
GetTsFun fp, void* pFile, TSKEY delMark, const char* id);
GetTsFun fp, void* pFile, TSKEY delMark, const char* taskId,
int64_t checkpointId);
void streamFileStateDestroy(SStreamFileState* pFileState);
void streamFileStateClear(SStreamFileState* pFileState);
bool needClearDiskBuff(SStreamFileState* pFileState);
@ -44,7 +45,7 @@ bool hasRowBuff(SStreamFileState* pFileState, void* pKey, int32_t keyLen);
SStreamSnapshot* getSnapshot(SStreamFileState* pFileState);
int32_t flushSnapshot(SStreamFileState* pFileState, SStreamSnapshot* pSnapshot, bool flushState);
int32_t recoverSnapshot(SStreamFileState* pFileState);
int32_t recoverSnapshot(SStreamFileState* pFileState, int64_t ckId);
int32_t getSnapshotIdList(SStreamFileState* pFileState, SArray* list);
int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark);

View File

@ -43,8 +43,8 @@ typedef struct SUpdateKey {
// uint64_t maxDataVersion;
//} SUpdateInfo;
SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark);
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark, bool igUp);
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark, bool igUp);
TSKEY updateInfoFillBlockData(SUpdateInfo *pInfo, SSDataBlock *pBlock, int32_t primaryTsCol);
bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts);
bool updateInfoIsTableInserted(SUpdateInfo *pInfo, int64_t tbUid);

View File

@ -101,6 +101,7 @@ typedef struct SSyncCfg {
int32_t myIndex;
SNodeInfo nodeInfo[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
SyncIndex lastIndex;
int32_t changeVersion;
} SSyncCfg;
typedef struct SFsmCbMeta {
@ -239,19 +240,22 @@ typedef struct SSyncState {
ESyncState state;
bool restored;
bool canRead;
int32_t progress;
SyncTerm term;
int64_t roleTimeMs;
int64_t startTimeMs;
} SSyncState;
int32_t syncInit();
void syncCleanUp();
int64_t syncOpen(SSyncInfo* pSyncInfo);
int32_t syncStart(int64_t rid);
void syncStop(int64_t rid);
void syncPreStop(int64_t rid);
void syncPostStop(int64_t rid);
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq);
int32_t syncIsCatchUp(int64_t rid);
int32_t syncInit();
void syncCleanUp();
int64_t syncOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion);
int32_t syncStart(int64_t rid);
void syncStop(int64_t rid);
void syncPreStop(int64_t rid);
void syncPostStop(int64_t rid);
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq);
int32_t syncCheckMember(int64_t rid);
int32_t syncIsCatchUp(int64_t rid);
ESyncRole syncGetRole(int64_t rid);
int32_t syncProcessMsg(int64_t rid, SRpcMsg* pMsg);
int32_t syncReconfig(int64_t rid, SSyncCfg* pCfg);
@ -269,6 +273,8 @@ SSyncState syncGetState(int64_t rid);
void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet);
const char* syncStr(ESyncState state);
int32_t syncNodeGetConfig(int64_t rid, SSyncCfg *cfg);
#ifdef __cplusplus
}
#endif

View File

@ -300,6 +300,25 @@ void tfsClosedir(STfsDir *pDir);
*/
int32_t tfsGetMonitorInfo(STfs *pTfs, SMonDiskInfo *pInfo);
/**
* @brief Check if disk space available at level
*
* @param pTfs The fs object.
* #param level the level
* @return bool
*/
bool tfsDiskSpaceAvailable(STfs *pTfs, int32_t level);
/**
* @brief Check if disk space sufficient at disk of level
*
* @param pTfs The fs object.
* @param level the level
* @param disk the disk
* @return bool
*/
bool tfsDiskSpaceSufficient(STfs *pTfs, int32_t level, int32_t disk);
#ifdef __cplusplus
}
#endif

View File

@ -83,7 +83,7 @@ void taosRemoveDir(const char *dirname);
bool taosDirExist(const char *dirname);
int32_t taosMkDir(const char *dirname);
int32_t taosMulMkDir(const char *dirname);
int32_t taosMulModeMkDir(const char *dirname, int mode);
int32_t taosMulModeMkDir(const char *dirname, int mode, bool checkAccess);
void taosRemoveOldFiles(const char *dirname, int32_t keepDays);
int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen);
int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen);

View File

@ -76,7 +76,7 @@ int32_t taosUnLockFile(TdFilePtr pFile);
int32_t taosUmaskFile(int32_t maskVal);
int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime);
int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime);
int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno);
int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime);
bool taosCheckExistFile(const char *pathname);

View File

@ -32,6 +32,8 @@ void taosSeedRand(uint32_t seed);
uint32_t taosRand(void);
uint32_t taosRandR(uint32_t* pSeed);
void taosRandStr(char* str, int32_t size);
void taosRandStr2(char* str, int32_t size);
uint32_t taosSafeRand(void);
#ifdef __cplusplus

View File

@ -90,6 +90,11 @@ int8_t taosStr2Int8(const char *str, char **pEnd, int32_t radix);
uint8_t taosStr2UInt8(const char *str, char **pEnd, int32_t radix);
double taosStr2Double(const char *str, char **pEnd);
float taosStr2Float(const char *str, char **pEnd);
int32_t taosHex2Ascii(const char *z, uint32_t n, void** data, uint32_t* size);
int32_t taosAscii2Hex(const char *z, uint32_t n, void** data, uint32_t* size);
//int32_t taosBin2Ascii(const char *z, uint32_t n, void** data, uint32_t* size);
bool isHex(const char* z, uint32_t n);
bool isValidateHex(const char* z, uint32_t n);
#ifdef __cplusplus
}

View File

@ -95,6 +95,8 @@ struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf);
struct tm *taosLocalTimeNolock(struct tm *result, const time_t *timep, int dst);
time_t taosTime(time_t *t);
time_t taosMktime(struct tm *timep);
int64_t user_mktime64(const uint32_t year, const uint32_t mon, const uint32_t day, const uint32_t hour,
const uint32_t min, const uint32_t sec, int64_t time_zone);
#ifdef __cplusplus
}

View File

@ -191,6 +191,7 @@ int32_t* taosGetErrno();
// #define TSDB_CODE_MND_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0313) // 2.x
// #define TSDB_CODE_MND_FAILED_TO_INIT_STEP TAOS_DEF_ERROR_CODE(0, 0x0314) // 2.x
#define TSDB_CODE_MND_USER_DISABLED TAOS_DEF_ERROR_CODE(0, 0x0315)
#define TSDB_CODE_MND_INVALID_PLATFORM TAOS_DEF_ERROR_CODE(0, 0x0316)
// mnode-sdb
#define TSDB_CODE_SDB_OBJ_ALREADY_THERE TAOS_DEF_ERROR_CODE(0, 0x0320) // internal
@ -517,6 +518,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_QRY_JOB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x072F)
#define TSDB_CODE_QRY_QWORKER_QUIT TAOS_DEF_ERROR_CODE(0, 0x0730)
#define TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR TAOS_DEF_ERROR_CODE(0, 0x0731)
#define TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x0732)
// grant
#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800)
@ -707,12 +709,14 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667)
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668)
#define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669)
#define TSDB_CODE_PAR_INVALID_VARBINARY TAOS_DEF_ERROR_CODE(0, 0x266A)
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
//planner
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
#define TSDB_CODE_PLAN_EXPECTED_TS_EQUAL TAOS_DEF_ERROR_CODE(0, 0x2701)
#define TSDB_CODE_PLAN_NOT_SUPPORT_CROSS_JOIN TAOS_DEF_ERROR_CODE(0, 0x2702)
#define TSDB_CODE_PLAN_NOT_SUPPORT_JOIN_COND TAOS_DEF_ERROR_CODE(0, 0x2703)
//function
#define TSDB_CODE_FUNC_FUNTION_ERROR TAOS_DEF_ERROR_CODE(0, 0x2800)

Some files were not shown because too many files have changed in this diff Show More