Merge branch '3.0' into mark/doc-3.0

This commit is contained in:
WANG MINGMING 2023-08-30 15:29:49 +08:00 committed by GitHub
commit 6d8757115f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
412 changed files with 27524 additions and 9862 deletions

View File

@ -0,0 +1,19 @@
# apr-util
ExternalProject_Add(aprutil-1
URL https://dlcdn.apache.org//apr/apr-util-1.6.3.tar.gz
URL_HASH SHA256=2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/apache/apr-util.git
#GIT_TAG 1.5.4
SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1
#CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)

View File

@ -0,0 +1,19 @@
# apr
ExternalProject_Add(apr-1
URL https://dlcdn.apache.org//apr/apr-1.7.4.tar.gz
URL_HASH SHA256=a4137dd82a185076fa50ba54232d920a17c6469c30b0876569e1c2a05ff311d9
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/apache/apr.git
#GIT_TAG 1.5.2
SOURCE_DIR "${TD_CONTRIB_DIR}/apr"
BUILD_IN_SOURCE TRUE
UPDATE_DISCONNECTED TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --enable-shared=no
#CONFIGURE_COMMAND ./configure
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)

View File

@ -1,5 +1,4 @@
cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE ON)
set(TD_BUILD_TAOSA_INTERNAL FALSE)
@ -78,6 +77,12 @@ ELSE ()
SET(TD_TAOS_TOOLS TRUE)
ENDIF ()
IF (${TD_WINDOWS})
SET(TAOS_LIB taos_static)
ELSE ()
SET(TAOS_LIB taos)
ENDIF ()
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")

View File

@ -125,6 +125,16 @@ option(
ON
)
IF(${TD_LINUX})
option(
BUILD_WITH_COS
"If build with cos"
ON
)
ENDIF ()
option(
BUILD_WITH_SQLITE
"If build with sqlite"

View File

@ -0,0 +1,12 @@
# cos
ExternalProject_Add(cos
GIT_REPOSITORY https://github.com/tencentyun/cos-c-sdk-v5.git
GIT_TAG v5.0.16
SOURCE_DIR "${TD_CONTRIB_DIR}/cos-c-sdk-v5"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@ -0,0 +1,17 @@
# curl
ExternalProject_Add(curl
URL https://curl.se/download/curl-8.2.1.tar.gz
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/curl/curl.git
#GIT_TAG curl-7_88_1
SOURCE_DIR "${TD_CONTRIB_DIR}/curl"
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd
#CONFIGURE_COMMAND ./configure --without-ssl
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)

View File

@ -0,0 +1,14 @@
# cos
ExternalProject_Add(mxml
GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git
GIT_TAG release-2.10
SOURCE_DIR "${TD_CONTRIB_DIR}/mxml"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --enable-shared=no
#CONFIGURE_COMMAND ./configure
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)

View File

@ -6,6 +6,39 @@ function(cat IN_FILE OUT_FILE)
file(APPEND ${OUT_FILE} "${CONTENTS}")
endfunction(cat IN_FILE OUT_FILE)
if(${TD_LINUX})
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
if(${BUILD_WITH_COS})
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
endif(${BUILD_WITH_COS})
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
if(${BUILD_WITH_COS})
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
endif(${BUILD_WITH_COS})
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
endif(${TD_LINUX})
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -122,6 +155,16 @@ if(${BUILD_WITH_SQLITE})
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_SQLITE})
# cos
if(${BUILD_WITH_COS})
#cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_COS)
endif(${BUILD_WITH_COS})
# lucene
if(${BUILD_WITH_LUCENE})
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -347,6 +390,31 @@ if (${BUILD_WITH_ROCKSDB})
endif()
endif()
# cos
if(${BUILD_WITH_COS})
if(${TD_LINUX})
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
#ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
option(ENABLE_TEST "Enable the tests" OFF)
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
MESSAGE("$ENV{HOME}/.cos-local.1/include")
set(CMAKE_BUILD_TYPE debug)
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
set(CMAKE_PROJECT_NAME cos_c_sdk)
add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL)
target_include_directories(
cos_c_sdk
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cos-c-sdk-v5/cos_c_sdk>
)
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
else()
endif(${TD_LINUX})
endif(${BUILD_WITH_COS})
# lucene
# To support build on ubuntu: sudo apt-get install libboost-all-dev
if(${BUILD_WITH_LUCENE})

View File

@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB})
add_subdirectory(rocksdb)
endif(${BUILD_WITH_ROCKSDB})
# cos
if(${BUILD_WITH_COS})
add_subdirectory(cos)
endif(${BUILD_WITH_COS})
if(${BUILD_WITH_LUCENE})
add_subdirectory(lucene)
endif(${BUILD_WITH_LUCENE})

View File

@ -0,0 +1,49 @@
add_executable(cosTest "")
target_sources(cosTest
PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
)
#find_path(APR_INCLUDE_DIR apr-1/apr_time.h)
#find_path(APR_UTIL_INCLUDE_DIR apr/include/apr-1/apr_md5.h)
#find_path(MINIXML_INCLUDE_DIR mxml.h)
#find_path(CURL_INCLUDE_DIR curl/curl.h)
#include_directories (${MINIXML_INCLUDE_DIR})
#include_directories (${CURL_INCLUDE_DIR})
FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
#FIND_PROGRAM(APU_CONFIG_BIN NAMES apu-config apu-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
IF (APR_CONFIG_BIN)
EXECUTE_PROCESS(
COMMAND ${APR_CONFIG_BIN} --includedir
OUTPUT_VARIABLE APR_INCLUDE_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE
)
ENDIF()
#IF (APU_CONFIG_BIN)
# EXECUTE_PROCESS(
# COMMAND ${APU_CONFIG_BIN} --includedir
# OUTPUT_VARIABLE APR_UTIL_INCLUDE_DIR
# OUTPUT_STRIP_TRAILING_WHITESPACE
# )
#ENDIF()
include_directories (${APR_INCLUDE_DIR})
#include_directories (${APR_UTIL_INCLUDE_DIR})
target_include_directories(
cosTest
PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk"
)
#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
#find_library(MINIXML_LIBRARY mxml)
#find_library(CURL_LIBRARY curl)
target_link_libraries(cosTest cos_c_sdk)
target_link_libraries(cosTest apr-1})
target_link_libraries(cosTest aprutil-1})
target_link_libraries(cosTest mxml)
target_link_libraries(cosTest curl)

3095
contrib/test/cos/main.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -35,11 +35,12 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
:::note
- All the data in `tag_set` will be converted to NCHAR type automatically .
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
- All the data in `tag_set` will be converted to NCHAR type automatically
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h)
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
:::
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)

View File

@ -34,6 +34,7 @@ meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
## Examples
@ -68,11 +69,11 @@ Database changed.
taos> show stables;
name |
=================================
meters.current |
meters.voltage |
meters_current |
meters_voltage |
Query OK, 2 row(s) in set (0.002544s)
taos> select tbname, * from `meters.current`;
taos> select tbname, * from `meters_current`;
tbname | _ts | _value | groupid | location |
==================================================================================================================================
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles |
@ -87,5 +88,5 @@ Query OK, 4 row(s) in set (0.005399s)
If you want query the data of `location=California.LosAngeles groupid=3`, here is the query SQL:
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
```

View File

@ -49,6 +49,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
:::
## Examples
@ -83,11 +84,11 @@ Database changed.
taos> show stables;
name |
=================================
meters.current |
meters.voltage |
meters_current |
meters_voltage |
Query OK, 2 row(s) in set (0.001954s)
taos> select * from `meters.current`;
taos> select * from `meters_current`;
_ts | _value | groupid | location |
===================================================================================================================
2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco |
@ -100,5 +101,5 @@ Query OK, 2 row(s) in set (0.004076s)
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
```

View File

@ -52,7 +52,7 @@ CREATE TABLE d1004 USING meters TAGS ("California.LosAngeles", 3);
### Create a Stream
```sql
create stream current_stream into current_stream_output_stb as select _wstart as start, _wend as end, max(current) as max_current from meters where voltage <= 220 interval (5s);
create stream current_stream trigger at_once into current_stream_output_stb as select _wstart as wstart, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);
```
### Write Data

View File

@ -53,17 +53,17 @@ The related schemas and APIs in various languages are described as follows:
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
typedef enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end; // The last version of wal + 1
} tmq_topic_assignment;
DLL_EXPORT tmq_conf_t *tmq_conf_new();
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
@ -82,21 +82,21 @@ The related schemas and APIs in various languages are described as follows:
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msgs offset + 1
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment);
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result
DLL_EXPORT const char *tmq_err2str(int32_t code);
```
The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below.
@ -113,7 +113,19 @@ Set<String> subscription() throws SQLException;
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
Set<TopicPartition> assignment() throws SQLException;
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
void close() throws SQLException;
```

53
docs/en/12-taos-sql/06-select.md Normal file → Executable file
View File

@ -7,9 +7,9 @@ description: This document describes how to query data in TDengine.
## Syntax
```sql
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE()}
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
SELECT [DISTINCT] select_list
SELECT [hints] [DISTINCT] [TAGS] select_list
from_clause
[WHERE condition]
[partition_by_clause]
@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
[LIMIT limit_val [OFFSET offset_val]]
[>> export_file]
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN
select_list:
select_expr [, select_expr] ...
@ -70,6 +75,29 @@ order_expr:
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
```
## Hints
Hints are a means of user control over query optimization for individual statements. Hints will be ignore automatically if they are not applicable to the current query statement. The specific instructions are as follows:
- Hints syntax starts with `/*+` and ends with `*/`, spaces are allowed before or after.
- Hints syntax can only follow the SELECT keyword.
- Each hints can contain multiple hint, separated by spaces. When multiple hints conflict or are identical, whichever comes first takes effect.
- When an error occurs with a hint in hints, the effective hint before the error is still valid, and the current and subsequent hints are ignored.
- hint_param_list are arguments to each hint, which varies according to each hint.
The list of currently supported Hints is as follows:
| **Hint** | **Params** | **Comment** | **Scopt** |
| :-----------: | -------------- | -------------------------- | -------------------------- |
| BATCH_SCAN | None | Batch table scan | JOIN statment for stable |
| NO_BATCH_SCAN | None | Sequential table scan | JOIN statment for stable |
For example:
```sql
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
```
## Lists
A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list.
@ -167,7 +195,7 @@ The following SQL statement returns the number of subtables within the meters su
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
```
In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause. For example:
In the preceding two statements, only tags can be used as filtering conditions in the WHERE clause.
**\_QSTART and \_QEND**
@ -197,6 +225,14 @@ The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolu
select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
```
### TAGS Query
The TAGS keyword returns only tag columns from all child tables when only tag columns are specified. One row containing tag columns is returned for each child table.
```sql
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
```
## Query Objects
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.
@ -209,8 +245,7 @@ You can perform INNER JOIN statements based on the primary key. The following co
3. For supertables, the ON condition must be equivalent to the primary key. In addition, the tag columns of the tables on which the INNER JOIN is performed must have a one-to-one relationship. You cannot specify an OR condition.
4. The tables that are included in a JOIN clause must be of the same type (supertable, standard table, or subtable).
5. You can include subqueries before and after the JOIN keyword.
6. You cannot include more than ten tables in a JOIN clause.
7. You cannot include a FILL clause and a JOIN clause in the same statement.
6. You cannot include a FILL clause and a JOIN clause in the same statement.
## GROUP BY
@ -301,6 +336,12 @@ SELECT TODAY();
SELECT TIMEZONE();
```
### Obtain Current User
```sql
SELECT CURRENT_USER();
```
## Regular Expression
### Syntax
@ -355,7 +396,7 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
## JOIN
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables.
TDengine supports the `INTER JOIN` based on the timestamp primary key, that is, the `JOIN` condition must contain the timestamp primary key. As long as the requirement of timestamp-based primary key is met, `INTER JOIN` can be made between normal tables, sub-tables, super tables and sub-queries at will, and there is no limit on the number of tables, primary key and other conditions must be combined with `AND` operator.
For standard tables:

View File

@ -1275,6 +1275,14 @@ SELECT SERVER_STATUS();
**Description**: The server status.
### CURRENT_USER
```sql
SELECT CURRENT_USER();
```
**Description**: get current user.
## Geometry Functions

View File

@ -168,3 +168,11 @@ All [scalar functions](../function/#scalar-functions) are available in stream pr
- [unique](../function/#unique)
- [mode](../function/#mode)
## Pause\Resume stream
1.pause stream
PAUSE STREAM [IF EXISTS] stream_name;
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported; If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, paused all stream tasks.
2.resume stream
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported. If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, all of the stream tasks will be resumed. If "IGNORE UntREATED" is specified, data written during the pause period of stream is ignored when resuming stream.

View File

@ -178,7 +178,7 @@ The following list shows all reserved keywords:
- MATCH
- MAX_DELAY
- MAX_SPEED
- BWLIMIT
- MAXROWS
- MERGE
- META

View File

@ -22,6 +22,14 @@ SHOW CLUSTER;
Shows information about the current cluster.
## SHOW CLUSTER ALIVE
```sql
SHOW CLUSTER ALIVE;
```
It is used to check whether the cluster is available or not. Return value: 0 means unavailable, 1 means available, 2 means partially available (some dnodes are offline, the other dnodes are available)
## SHOW CONNECTIONS
```sql

View File

@ -4,7 +4,7 @@ sidebar_label: Access Control
description: This document describes how to manage users and permissions in TDengine.
---
This document describes how to manage permissions in TDengine.
User and Access control is a distingguished feature of TDengine enterprise edition. In this section, only the most fundamental functionalities of user and access control are demonstrated. To get the full knowledge of user and access control, please contact the TDengine team.
## Create a User

View File

@ -19,6 +19,9 @@ index_option:
functions:
function [, function] ...
```
### tag Indexing
[tag index](../tag-index)
### SMA Indexing

View File

@ -17,7 +17,7 @@ You can use the SHOW CONNECTIONS statement to find the conn_id.
## Terminate a Query
```sql
KILL QUERY kill_id;
KILL QUERY 'kill_id';
```
You can use the SHOW QUERIES statement to find the kill_id.

View File

@ -135,7 +135,7 @@ The following describes the basic API, synchronous API, asynchronous API, subscr
The base API is used to do things like create database connections and provide a runtime environment for the execution of other APIs.
- `void taos_init()`
- `int taos_init()`
Initializes the runtime environment. If the API is not actively called, the driver will automatically call the API when `taos_connect()` is called, so the program generally does not need to call it manually.
@ -168,6 +168,12 @@ The base API is used to do things like create database connections and provide a
:::
- `TAOS *taos_connect_auth(const char *host, const char *user, const char *auth, const char *db, uint16_t port)`
The function is the same as taos_connect. Except that the pass parameter is replaced by auth, other parameters are the same as taos_connect.
- auth: the 32-bit lowercase md5 of the raw password
- `char *taos_get_server_info(TAOS *taos)`
Get server-side version information.
@ -184,6 +190,14 @@ The base API is used to do things like create database connections and provide a
- If len is less than the space required to store the db (including the last '\0'), an error is returned. The truncated data assigned in the database ends with '\0'.
- If len is greater than or equal to the space required to store the db (including the last '\0'), return normal 0, and assign the db name ending with '\0' in the database.
- `int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)`
Set the event callback function.
- fp: event callback function pointer. Declarationtypedef void (*__taos_notify_fn_t)(void *param, void *ext, int type)Param is a user-defined parameter, ext is an extended parameter (depending on the event type, and returns the user password version for TAOS_NOTIFY_PASSVER), and type is the event type
- param: user-defined parameter
- type: event type. Value range: 1) TAOS_NOTIFY_PASSVER: User password changed
- `void taos_close(TAOS *taos)`
Closes the connection, where `taos` is the handle returned by `taos_connect()`.
@ -307,21 +321,20 @@ The specific functions related to the interface are as follows (see also the [pr
Parse a SQL command, and bind the parsed result and parameter information to `stmt`. If the parameter length is greater than 0, use this parameter as the length of the SQL command. If it is equal to 0, the length of the SQL command will be determined automatically.
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind)`
Not as efficient as `taos_stmt_bind_param_batch()`, but can support non-INSERT type SQL statements.
To bind parameters, bind points to an array (representing the row of data to be bound), making sure that the number and order of the elements in this array are the same as the parameters in the SQL statement. taos_bind is used similarly to MYSQL_BIND in MySQL, as defined below.
```c
typedef struct TAOS_BIND {
typedef struct TAOS_MULTI_BIND {
int buffer_type;
void * buffer;
uintptr_t buffer_length; // not in use
uintptr_t * length;
int * is_null;
int is_unsigned; // not in use
int * error; // not in use
} TAOS_BIND;
void *buffer;
uintptr_t buffer_length;
uint32_t *length;
char *is_null;
int num;
} TAOS_MULTI_BIND;
```
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
@ -329,7 +342,7 @@ The specific functions related to the interface are as follows (see also the [pr
(Available in 2.1.1.0 and later versions, only supported for replacing parameter values in INSERT statements)
When the table name in the SQL command uses `? ` placeholder, you can use this function to bind a specific table name.
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_MULTI_BIND* tags)`
(Available in 2.1.2.0 and later versions, only supported for replacing parameter values in INSERT statements)
When the table name and TAGS in the SQL command both use `? `, you can use this function to bind the specific table name and the specific TAGS value. The most typical usage scenario is an INSERT statement that uses the automatic table building function (the current version does not support specifying specific TAGS columns.) The number of columns in the TAGS parameter needs to be the same as the number of TAGS requested in the SQL command.
@ -358,6 +371,14 @@ The specific functions related to the interface are as follows (see also the [pr
Execute the prepared statement. Currently, a statement can only be executed once.
- `int taos_stmt_affected_rows(TAOS_STMT *stmt)`
Gets the number of rows affected by executing bind statements multiple times.
- `int taos_stmt_affected_rows_once(TAOS_STMT *stmt)`
Gets the number of rows affected by executing a bind statement once.
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
Gets the result set of a statement. Use the result set in the same way as in the non-parametric call. When finished, `taos_free_result()` should be called on this result set to free resources.
@ -378,7 +399,7 @@ In addition to writing data using the SQL method or the parameter binding API, w
- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)`
**Function description**
This interface writes the text data of the line protocol to TDengine.
- This interface writes the text data of the line protocol to TDengine.
**Parameter description**
- taos: database connection, established by the `taos_connect()` function.
@ -387,12 +408,13 @@ In addition to writing data using the SQL method or the parameter binding API, w
- protocol: the protocol type of the lines, used to identify the text data format.
- precision: precision string for the timestamp in the text data.
**return value**
TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`.
**Return value**
- TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`.
In some cases, the returned TAOS_RES is `NULL`, and it is still possible to call `taos_errno()` to safely get the error code information.
The returned TAOS_RES needs to be freed by the caller in order to avoid memory leaks.
**Description**
The protocol type is enumerated and contains the following three formats.
- TSDB_SML_LINE_PROTOCOL: InfluxDB line protocol (Line Protocol)
@ -427,3 +449,90 @@ In addition to writing data using the SQL method or the parameter binding API, w
- Within _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
- Within _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
- Within _reqid interfaces can track the entire call chain by passing the reqid parameter.
### Subscription API
- `int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment)`
- `void tmq_free_assignment(tmq_topic_assignment* pAssignment)`
tmq_topic_assignment defined as follows:
```c
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
```
**Function description**
- tmq_get_topic_assignment get the current vgroup information of this consumer
**Parameter description**
- numOfAssignment:the num of vgroups assigned to this consumer
- assignment:the information of vgroups, needed to be freed by tmq_free_assignment
**Return value**
- zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
**Function description**
- get the committed offset
**Return value**
- the value of committed offset, -2147467247 means no committed value, Other values less than 0 indicate failure
- `int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg)`
- `void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param)`
- `int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
- `void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param)`
**Function description**
- The commit interface is divided into two types, each with synchronous and asynchronous interfaces:
- The first type: based on message submission, submit the progress in the message. If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer: tmq_commit_sync/tmq_commit_async
- The second type: submit based on the offset of a Vgroup in a topic: tmq_commit_offset_sync/tmq_commit_offset_async
**Parameter description**
- msgMessage consumed, If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer
**Return value**
- zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
**Function description**
- Obtain the current consumption location, which is the next location of the data consumed
**Return value**
- the current consumption location, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
**Function description**
- Set the offset position of the consumer in a Vgroup of a certain topic to start consumption
**Return value**
- zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)`
**Function description**
- Obtain the starting offset of the consumed data
**Parameter description**
- msgMessage consumed
**Return value**
- the starting offset of the consumed data, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)`
**Function description**
- Obtain a list of topics subscribed by consumers
**Parameter description**
- topics: a list of topics subscribed by consumersneed to be freed by tmq_list_destroy
**Return value**
- zero successnone zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`

View File

@ -36,6 +36,7 @@ REST connection supports all platforms that can run Java.
| taos-jdbcdriver version | major changes | TDengine version |
| :---------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------: |
| 3.2.5 | Subscription add committed() and assignment() method | 3.1.0.3 or later |
| 3.2.4 | Subscription add the enable.auto.commit parameter and the unsubscribe() method in the WebSocket connection | - |
| 3.2.3 | Fixed resultSet data parsing failure in some cases | - |
| 3.2.2 | Subscription add seek function | 3.0.5.0 or later |
@ -1019,14 +1020,19 @@ while(true) {
#### Assignment subscription Offset
```java
// get topicPartition
Set<TopicPartition> assignment() throws SQLException;
// get offset
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
// Overrides the fetch offsets that the consumer will use on the next poll(timeout).
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
```
Example usage is as follows.
@ -1052,6 +1058,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
}
```
#### Commit offset
If `enable.auto.commit` is false, offset can be submitted manually.
```java
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
// async commit only support jni connection
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
```
#### Close subscriptions
```java

View File

@ -30,6 +30,10 @@ The source code of `TDengine.Connector` is hosted on [GitHub](https://github.com
The supported platforms are the same as those supported by the TDengine client driver.
:::note
Please note TDengine does not support 32bit Windows any more.
:::
## Version support
Please refer to [version support list](/reference/connector#version-support)

View File

@ -102,6 +102,8 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-L, --loose-mode Use loose mode if the table name and column name
use letter and number only. Default is NOT.
-n, --no-escape No escape char '`'. Default is using it.
-Q, --dot-replace Repalce dot character with underline character in
the table name.
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
8.
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service

View File

@ -81,6 +81,14 @@ For example:
taos -h h1.taos.com -s "use db; show tables;"
```
## Export query results to a file
- You can use ">>" to export the query results to a file, the syntax is like `select * from table >> file`. If there is only file name without path, the file will be generated under the current working directory of TDegnine CLI.
## Import data from CSV file
- You can use `insert into table_name file 'fileName'` to import the data from the specified file into the specified table. For example, `insert into d0 file '/root/d0.csv';` means importing the data in file "/root/d0.csv" into table "d0". If there is only file name without path, that means the file is located under current working directory of TDengine CLI.
## TDengine CLI tips
- You can use the up and down keys to iterate the history of commands entered
@ -89,3 +97,5 @@ taos -h h1.taos.com -s "use db; show tables;"
- Execute `RESET QUERY CACHE` to clear the local cache of the table schema
- Execute SQL statements in batches. You can store a series of shell commands (ending with ;, one line for each SQL command) in a script file and execute the command `source <file-name>` in the TDengine CLI to execute all SQL commands in that file automatically
- Enter `q` to exit TDengine CLI

View File

@ -74,7 +74,7 @@ grafana-cli plugins install tdengine-datasource
sudo -u grafana grafana-cli plugins install tdengine-datasource
```
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
```bash
GF_VERSION=3.3.1

View File

@ -10,6 +10,14 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
import Release from "/components/ReleaseV3";
## 3.1.0.3
<Release type="tdengine" version="3.1.0.3" />
## 3.1.0.2
<Release type="tdengine" version="3.1.0.2" />
## 3.1.0.0
:::note IMPORTANT

View File

@ -78,6 +78,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_VARBINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);

View File

@ -51,7 +51,7 @@ void insertData(TAOS *taos) {
int code = taos_stmt_prepare(stmt, sql, 0);
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
// bind table name and tags
TAOS_BIND tags[2];
TAOS_MULTI_BIND tags[2];
char *location = "California.SanFrancisco";
int groupId = 2;
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
@ -144,4 +144,4 @@ int main() {
}
// output:
// successfully inserted 2 rows
// successfully inserted 2 rows

View File

@ -76,6 +76,7 @@ int printRow(char *str, TAOS_ROW row, TAOS_FIELD *fields, int numFields) {
} break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_VARBINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_GEOMETRY: {
int32_t charLen = varDataLen((char *)row[i] - VARSTR_HEADER_SIZE);

View File

@ -58,7 +58,7 @@ void insertData(TAOS *taos) {
int code = taos_stmt_prepare(stmt, sql, 0);
checkErrorCode(stmt, code, "failed to execute taos_stmt_prepare");
// bind table name and tags
TAOS_BIND tags[2];
TAOS_MULTI_BIND tags[2];
char* location = "California.SanFrancisco";
int groupId = 2;
tags[0].buffer_type = TSDB_DATA_TYPE_BINARY;
@ -82,7 +82,7 @@ void insertData(TAOS *taos) {
{1648432611749, 12.6, 218, 0.33},
};
TAOS_BIND values[4];
TAOS_MULTI_BIND values[4];
values[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
values[0].buffer_length = sizeof(int64_t);
values[0].length = &values[0].buffer_length;
@ -138,4 +138,4 @@ int main() {
// output:
// successfully inserted 2 rows
// successfully inserted 2 rows

View File

@ -6,7 +6,14 @@ toc_max_heading_level: 2
TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/)且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
本章节介绍 TDengine 的主要产品和功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
## 主要产品
TDengine 有三个主要产品TDengine Pro (即 TDengine 企业版TDengine Cloud和 TDengine OSS关于它们的具体定义请参考
- [TDengine 企业版](https://www.taosdata.com/tdengine-pro)
- [TDengine 云服务](https://cloud.taosdata.com/?utm_source=menu&utm_medium=webcn)
- [TDengine 开源版](https://www.taosdata.com/tdengine-oss)
## 主要功能

View File

@ -34,11 +34,12 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
:::note
- tag_set 中的所有的数据自动转化为 NCHAR 数据类型;
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理;
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度
- tag_set 中的所有的数据自动转化为 NCHAR 数据类型
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field后面的数据按照这个顺序如果顺序不一样需要配置参数 smlDataFormat 为 false否则数据写入按照相同顺序写入库中数据会异常。3.0.1.3 之后的版本 smlDataFormat 默认为 false从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
:::
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)

View File

@ -67,11 +67,11 @@ Database changed.
taos> SHOW STABLES;
name |
=================================
meters.current |
meters.voltage |
meters_current |
meters_voltage |
Query OK, 2 row(s) in set (0.002544s)
taos> SELECT TBNAME, * FROM `meters.current`;
taos> SELECT TBNAME, * FROM `meters_current`;
tbname | _ts | _value | groupid | location |
==================================================================================================================================
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles |
@ -83,10 +83,10 @@ Query OK, 4 row(s) in set (0.005399s)
## SQL 查询示例
`meters.current` 是插入数据的超级表名。
`meters_current` 是插入数据的超级表名。
可以通过超级表的 TAG 来过滤数据,比如查询 `location=California.LosAngeles groupid=3` 可以通过如下 SQL
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
```

View File

@ -48,6 +48,7 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
- 对于 JSON 格式协议TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
:::
## 示例代码
@ -82,8 +83,8 @@ Database changed.
taos> SHOW STABLES;
name |
=================================
meters.current |
meters.voltage |
meters_current |
meters_voltage |
Query OK, 2 row(s) in set (0.001954s)
taos> SELECT * FROM `meters.current`;
@ -96,10 +97,10 @@ Query OK, 2 row(s) in set (0.004076s)
## SQL 查询示例
`meters.voltage` 是插入数据的超级表名。
`meters_voltage` 是插入数据的超级表名。
可以通过超级表的 TAG 来过滤数据,比如查询 `location=California.LosAngeles groupid=1` 可以通过如下 SQL
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
```

View File

@ -115,7 +115,19 @@ Set<String> subscription() throws SQLException;
ConsumerRecords<V> poll(Duration timeout) throws SQLException;
Set<TopicPartition> assignment() throws SQLException;
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
void close() throws SQLException;
```

View File

@ -223,7 +223,7 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
基础 API 用于完成创建数据库连接等工作,为其它 API 的执行提供运行时环境。
- `void taos_init()`
- `int taos_init()`
初始化运行环境。如果没有主动调用该 API那么调用 `taos_connect()` 时驱动将自动调用该 API故程序一般无需手动调用。
@ -256,6 +256,12 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
:::
- `TAOS *taos_connect_auth(const char *host, const char *user, const char *auth, const char *db, uint16_t port)`
功能同 taos_connect。除 pass 参数替换为 auth 外,其他参数同 taos_connect。
- auth: 原始密码取 32 位小写 md5
- `char *taos_get_server_info(TAOS *taos)`
获取服务端版本信息。
@ -272,6 +278,14 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
- 如果len 小于 存储db需要的空间包含最后的'\0'返回错误database里赋值截断的数据以'\0'结尾。
- 如果len 大于等于 存储db需要的空间包含最后的'\0'返回正常0database里赋值以'\0结尾的db名。
- `int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)`
设置事件回调函数。
- fp 事件回调函数指针。函数声明typedef void (*__taos_notify_fn_t)(void *param, void *ext, int type);其中, param 为用户自定义参数ext 为扩展参数(依赖事件类型,针对 TAOS_NOTIFY_PASSVER 返回用户密码版本)type 为事件类型
- param 用户自定义参数
- type 事件类型。取值范围1TAOS_NOTIFY_PASSVER 用户密码改变
- `void taos_close(TAOS *taos)`
关闭连接,其中`taos`是 `taos_connect()` 返回的句柄。
@ -396,21 +410,20 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
解析一条 SQL 语句,将解析结果和参数信息绑定到 stmt 上,如果参数 length 大于 0将使用此参数作为 SQL 语句的长度,如等于 0将自动判断 SQL 语句的长度。
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind)`
- `int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind)`
不如 `taos_stmt_bind_param_batch()` 效率高,但可以支持非 INSERT 类型的 SQL 语句。
进行参数绑定bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 类似,具体定义如下:
进行参数绑定bind 指向一个数组(代表所要绑定的一行数据),需保证此数组中的元素数量和顺序与 SQL 语句中的参数完全一致。TAOS_MULTI_BIND 的使用方法与 MySQL 中的 MYSQL_BIND 类似,具体定义如下:
```c
typedef struct TAOS_BIND {
typedef struct TAOS_MULTI_BIND {
int buffer_type;
void * buffer;
uintptr_t buffer_length; // not in use
uintptr_t * length;
int * is_null;
int is_unsigned; // not in use
int * error; // not in use
} TAOS_BIND;
void *buffer;
uintptr_t buffer_length;
uint32_t *length;
char *is_null;
int num; // the number of columns
} TAOS_MULTI_BIND;
```
- `int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name)`
@ -418,7 +431,7 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
当 SQL 语句中的表名使用了 `?` 占位时,可以使用此函数绑定一个具体的表名。
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags)`
- `int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_MULTI_BIND* tags)`
2.1.2.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
当 SQL 语句中的表名和 TAGS 都使用了 `?` 占位时,可以使用此函数绑定具体的表名和具体的 TAGS 取值。最典型的使用场景是使用了自动建表功能的 INSERT 语句(目前版本不支持指定具体的 TAGS 列。TAGS 参数中的列数量需要与 SQL 语句中要求的 TAGS 数量完全一致。
@ -428,17 +441,6 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
2.1.1.0 版本新增,仅支持用于替换 INSERT 语句中的参数值)
以多列的方式传递待绑定的数据,需要保证这里传递的数据列的顺序、列的数量与 SQL 语句中的 VALUES 参数完全一致。TAOS_MULTI_BIND 的具体定义如下:
```c
typedef struct TAOS_MULTI_BIND {
int buffer_type;
void * buffer;
uintptr_t buffer_length;
uintptr_t * length;
char * is_null;
int num; // the number of columns
} TAOS_MULTI_BIND;
```
- `int taos_stmt_add_batch(TAOS_STMT *stmt)`
将当前绑定的参数加入批处理中,调用此函数后,可以再次调用 `taos_stmt_bind_param()` 或 `taos_stmt_bind_param_batch()` 绑定新的参数。需要注意,此函数仅支持 INSERT/IMPORT 语句,如果是 SELECT 等其他 SQL 语句,将返回错误。
@ -447,6 +449,14 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
执行准备好的语句。目前,一条语句只能执行一次。
- `int taos_stmt_affected_rows(TAOS_STMT *stmt)`
获取执行多次绑定语句影响的行数。
- `int taos_stmt_affected_rows_once(TAOS_STMT *stmt)`
获取执行一次绑定语句影响的行数。
- `TAOS_RES* taos_stmt_use_result(TAOS_STMT *stmt)`
获取语句的结果集。结果集的使用方式与非参数化调用时一致,使用完成后,应对此结果集调用 `taos_free_result()` 以释放资源。
@ -467,21 +477,22 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)`
**功能说明**
该接口将行协议的文本数据写入到 TDengine 中。
- 该接口将行协议的文本数据写入到 TDengine 中。
**参数说明**
taos: 数据库连接,通过 `taos_connect()` 函数建立的数据库连接。
lines文本数据。满足解析格式要求的无模式文本字符串。
numLines:文本数据的行数,不能为 0 。
protocol: 行协议类型,用于标识文本数据格式。
precision文本数据中的时间戳精度字符串。
- taos: 数据库连接,通过 `taos_connect()` 函数建立的数据库连接。
- lines文本数据。满足解析格式要求的无模式文本字符串。
- numLines:文本数据的行数,不能为 0 。
- protocol: 行协议类型,用于标识文本数据格式。
- precision文本数据中的时间戳精度字符串。
**返回值**
TAOS_RES 结构体,应用可以通过使用 `taos_errstr()` 获得错误信息,也可以使用 `taos_errno()` 获得错误码。
- TAOS_RES 结构体,应用可以通过使用 `taos_errstr()` 获得错误信息,也可以使用 `taos_errno()` 获得错误码。
在某些情况下,返回的 TAOS_RES 为 `NULL`,此时仍然可以调用 `taos_errno()` 来安全地获得错误码信息。
返回的 TAOS_RES 需要调用方来负责释放,否则会出现内存泄漏。
**说明**
协议类型是枚举类型,包含以下三种格式:
- TSDB_SML_LINE_PROTOCOLInfluxDB 行协议Line Protocol)
@ -515,3 +526,90 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
- 带_raw的接口通过传递的参数lines指针和长度len来表示数据为了解决原始接口数据包含'\0'而被截断的问题。totalRows指针返回解析出来的数据行数。
- 带_ttl的接口可以传递ttl参数来控制建表的ttl到期时间。
- 带_reqid的接口可以通过传递reqid参数来追踪整个的调用链。
### 数据订阅 API
- `int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment)`
- `void tmq_free_assignment(tmq_topic_assignment* pAssignment)`
tmq_topic_assignment结构体定义如下
```c
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
```
**功能说明**
- tmq_get_topic_assignment 接口返回当前consumer分配的vgroup的信息每个vgroup的信息包括vgIdwal的最大最小offset以及当前消费到的offset。
**参数说明**
- numOfAssignment 分配给该consumer有效的vgroup个数。
- assignment 分配的信息数据大小为numOfAssignment需要通过 tmq_free_assignment 接口释放。
**返回值**
- 错误码0成功非0失败可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息。
- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
**功能说明**
- 获取当前 consumer 在某个 topic 和 vgroup上的 commit 位置。
**返回值**
- 当前commit的位置-2147467247表示没有消费进度其他小于0的值表示失败错误码就是返回值
- `int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg)`
- `void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param)`
- `int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
- `void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param)`
**功能说明**
- commit接口分为两种类型每种类型有同步和异步接口
- 第一种类型根据消息提交提交消息里的进度如果消息传NULL提交当前consumer所有消费的vgroup的当前进度 : tmq_commit_sync/tmq_commit_async
- 第二种类型根据某个topic的某个vgroup的offset提交 : tmq_commit_offset_sync/tmq_commit_offset_async
**参数说明**
- msg消费到的消息结构如果msg传NULL提交当前consumer所有消费的vgroup的当前进度
**返回值**
- 错误码0成功非0失败可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
- `int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
**功能说明**
- 获取当前消费位置,为消费到的数据位置的下一个位置
**返回值**
- 消费位置非0失败可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
- `int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
**功能说明**
- 设置 consumer 在某个topic的某个vgroup的 offset位置开始消费
**返回值**
- 错误码0成功非0失败可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)`
**功能说明**
- 获取 poll 消费到的数据的起始offset
**参数说明**
- msg消费到的消息结构
**返回值**
- 消费到的offset非0失败可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)`
**功能说明**
- 获取消费者订阅的 topic 列表
**参数说明**
- topics: 获取的 topic 列表存储在这个结构中接口内分配内存需调用tmq_list_destroy释放
**返回值**
- 错误码0成功非0失败可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息

View File

@ -36,6 +36,7 @@ REST 连接支持所有能运行 Java 的平台。
| taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 |
| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: |
| 3.2.5 | 数据订阅增加 committed()、assignment() 方法 | 3.1.0.3 及更高版本 |
| 3.2.4 | 数据订阅在 WebSocket 连接下增加 enable.auto.commit 参数,以及 unsubscribe() 方法。 | - |
| 3.2.3 | 修复 ResultSet 在一些情况数据解析失败 | - |
| 3.2.2 | 新增功能:数据订阅支持 seek 功能。 | 3.0.5.0 及更高版本 |
@ -1022,14 +1023,19 @@ while(true) {
#### 指定订阅 Offset
```java
// 获取订阅的 topicPartition
Set<TopicPartition> assignment() throws SQLException;
// 获取 offset
long position(TopicPartition partition) throws SQLException;
Map<TopicPartition, Long> position(String topic) throws SQLException;
Map<TopicPartition, Long> beginningOffsets(String topic) throws SQLException;
Map<TopicPartition, Long> endOffsets(String topic) throws SQLException;
Map<TopicPartition, OffsetAndMetadata> committed(Set<TopicPartition> partitions) throws SQLException;
// 指定下一次 poll 中使用的 offset
void seek(TopicPartition partition, long offset) throws SQLException;
void seekToBeginning(Collection<TopicPartition> partitions) throws SQLException;
void seekToEnd(Collection<TopicPartition> partitions) throws SQLException;
```
示例代码:
@ -1055,6 +1061,18 @@ try (TaosConsumer<ResultBean> consumer = new TaosConsumer<>(properties)) {
}
```
#### 提交 Offset
当`enable.auto.commit`为 false 时,可以手动提交 offset。
```java
void commitSync() throws SQLException;
void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets) throws SQLException;
// 异步提交仅在 native 连接下有效
void commitAsync(OffsetCommitCallback<V> callback) throws SQLException;
void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback<V> callback) throws SQLException;
```
#### 关闭订阅
```java

View File

@ -29,6 +29,10 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
支持的平台和 TDengine 客户端驱动支持的平台一致。
:::note
注意 TDengine 不再支持 32 位 Windows 平台。
:::
## 版本支持
请参考[版本支持列表](../#版本支持)

View File

@ -143,6 +143,7 @@ phpize && ./configure --enable-swoole && make -j && make install
| `TDengine\TSDB_DATA_TYPE_FLOAT` | float |
| `TDengine\TSDB_DATA_TYPE_DOUBLE` | double |
| `TDengine\TSDB_DATA_TYPE_BINARY` | binary |
| `TDengine\TSDB_DATA_TYPE_VARBINARY` | varbinary |
| `TDengine\TSDB_DATA_TYPE_TIMESTAMP` | timestamp |
| `TDengine\TSDB_DATA_TYPE_NCHAR` | nchar |
| `TDengine\TSDB_DATA_TYPE_UTINYINT` | utinyint |

55
docs/zh/12-taos-sql/06-select.md Normal file → Executable file
View File

@ -7,9 +7,9 @@ description: 查询数据的详细语法
## 查询语法
```sql
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE()}
SELECT {DATABASE() | CLIENT_VERSION() | SERVER_VERSION() | SERVER_STATUS() | NOW() | TODAY() | TIMEZONE() | CURRENT_USER() | USER() }
SELECT [DISTINCT] select_list
SELECT [hints] [DISTINCT] [TAGS] select_list
from_clause
[WHERE condition]
[partition_by_clause]
@ -21,6 +21,11 @@ SELECT [DISTINCT] select_list
[LIMIT limit_val [OFFSET offset_val]]
[>> export_file]
hints: /*+ [hint([hint_param_list])] [hint([hint_param_list])] */
hint:
BATCH_SCAN | NO_BATCH_SCAN
select_list:
select_expr [, select_expr] ...
@ -70,6 +75,29 @@ order_expr:
{expr | position | c_alias} [DESC | ASC] [NULLS FIRST | NULLS LAST]
```
## Hints
Hints 是用户控制单个语句查询优化的一种手段,当 Hint 不适用于当前的查询语句时会被自动忽略,具体说明如下:
- Hints 语法以`/*+`开始,终于`*/`,前后可有空格。
- Hints 语法只能跟随在 SELECT 关键字后。
- 每个 Hints 可以包含多个 HintHint 间以空格分开,当多个 Hint 冲突或相同时以先出现的为准。
- 当 Hints 中某个 Hint 出现错误时,错误出现之前的有效 Hint 仍然有效,当前及之后的 Hint 被忽略。
- hint_param_list 是每个 Hint 的参数,根据每个 Hint 的不同而不同。
目前支持的 Hints 列表如下:
| **Hint** | **参数** | **说明** | **适用范围** |
| :-----------: | -------------- | -------------------------- | -------------------------- |
| BATCH_SCAN | 无 | 采用批量读表的方式 | 超级表 JOIN 语句 |
| NO_BATCH_SCAN | 无 | 采用顺序读表的方式 | 超级表 JOIN 语句 |
举例:
```sql
SELECT /*+ BATCH_SCAN() */ a.ts FROM stable1 a, stable2 b where a.tag0 = b.tag0 and a.ts = b.ts;
```
## 列表
查询语句可以指定部分或全部列作为返回结果。数据列和标签列都可以出现在列表中。
@ -132,6 +160,16 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
:::
### 标签查询
当查询的列只有标签列时,`TAGS` 关键字可以指定返回所有子表的标签列。每个子表只返回一行标签列。
返回所有子表的标签列:
```sql
SELECT TAGS tag_name [, tag_name ...] FROM stb_name
```
### 结果集列名
`SELECT`子句中,如果不指定返回结果集合的列名,结果集列名称默认使用`SELECT`子句中的表达式名称作为列名称。此外,用户可使用`AS`来重命名返回结果集合中列的名称。例如:
@ -167,7 +205,7 @@ SELECT table_name, tag_name, tag_type, tag_value FROM information_schema.ins_tag
SELECT COUNT(*) FROM (SELECT DISTINCT TBNAME FROM meters);
```
以上两个查询均只支持在 WHERE 条件子句中添加针对标签TAGS的过滤条件。例如:
以上两个查询均只支持在 WHERE 条件子句中添加针对标签TAGS的过滤条件。
**\_QSTART/\_QEND**
@ -209,8 +247,7 @@ TDengine 支持基于时间戳主键的 INNER JOIN规则如下
3. 对于超级表ON 条件在时间戳主键的等值条件之外,还要求有可以一一对应的标签列等值条件,不支持 OR 条件。
4. 参与 JOIN 计算的表只能是同一种类型,即只能都是超级表,或都是子表,或都是普通表。
5. JOIN 两侧均支持子查询。
6. 参与 JOIN 的表个数上限为 10 个。
7. 不支持与 FILL 子句混合使用。
6. 不支持与 FILL 子句混合使用。
## GROUP BY
@ -301,6 +338,12 @@ SELECT TODAY();
SELECT TIMEZONE();
```
### 获取当前用户
```sql
SELECT CURRENT_USER();
```
## 正则表达式过滤
### 语法
@ -354,7 +397,7 @@ SELECT AVG(CASE WHEN voltage < 200 or voltage > 250 THEN 220 ELSE voltage END) F
## JOIN 子句
TDengine 支持基于时间戳主键的内连接,即 JOIN 条件必须包含时间戳主键。只要满足基于时间戳主键这个要求,普通表、子表、超级表和子查询之间可以随意的进行内连接,且对表个数没有限制。
TDengine 支持基于时间戳主键的内连接,即 JOIN 条件必须包含时间戳主键。只要满足基于时间戳主键这个要求,普通表、子表、超级表和子查询之间可以随意的进行内连接,且对表个数没有限制,其它连接条件与主键间必须是 AND 操作
普通表与普通表之间的 JOIN 操作:

View File

@ -1266,6 +1266,14 @@ SELECT SERVER_STATUS();
**说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。
### CURRENT_USER
```sql
SELECT CURRENT_USER();
```
**说明**:获取当前用户。
## Geometry 函数

2
docs/zh/12-taos-sql/12-distinguished.md Normal file → Executable file
View File

@ -31,7 +31,7 @@ select max(current) from meters partition by location interval(10m)
## 窗口切分查询
TDengine 支持按时间窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合窗口包含时间窗口time window、状态窗口status window、会话窗口session window件窗口event window四种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。
TDengine 支持按时间窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合窗口包含时间窗口time window、状态窗口status window、会话窗口session window件窗口event window四种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。
窗口子句语法如下:

View File

@ -249,3 +249,12 @@ T = 最新事件时间 - DELETE_MARK
- [unique](../function/#unique)
- [mode](../function/#mode)
## 暂停、恢复流计算
1.流计算暂停计算任务
PAUSE STREAM [IF EXISTS] stream_name;
没有指定IF EXISTS如果该stream不存在则报错如果存在则暂停流计算。指定了IF EXISTS如果该stream不存在则返回成功如果存在则暂停流计算
2.流计算恢复计算任务
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
没有指定IF EXISTS如果该stream不存在则报错如果存在则恢复流计算指定了IF EXISTS如果stream不存在则返回成功如果存在则恢复流计算。如果指定IGNORE UNTREATED则恢复流计算时忽略流计算暂停期间写入的数据。

View File

@ -178,7 +178,7 @@ description: TDengine 保留关键字的详细列表
- MATCH
- MAX_DELAY
- MAX_SPEED
- BWLIMIT
- MAXROWS
- MERGE
- META

View File

@ -22,6 +22,14 @@ SHOW CLUSTER;
显示当前集群的信息
## SHOW CLUSTER ALIVE
```sql
SHOW CLUSTER ALIVE;
```
查询当前集群的状态是否可用,返回值: 0不可用 1完全可用 2部分可用集群中部分节点下线但其它节点仍可以正常使用
## SHOW CONNECTIONS
```sql

View File

@ -1,138 +0,0 @@
---
sidebar_label: 权限管理
title: 权限管理
description: 企业版中才具有的权限管理功能
---
本节讲述如何在 TDengine 中进行权限管理的相关操作。
## 创建用户
```sql
CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
```
创建用户。
use_name 最长为 23 字节。
password 最长为 31 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等、存储相关的信息等。默认为可以查看系统信息。
例如创建密码为123456且可以查看系统信息的用户test如下
```sql
taos> create user test pass '123456' sysinfo 1;
Query OK, 0 of 0 rows affected (0.001254s)
```
## 查看用户
```sql
SHOW USERS;
```
查看用户信息。
```sql
taos> show users;
name | super | enable | sysinfo | create_time |
================================================================================
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
Query OK, 2 rows in database (0.001657s)
```
也可以通过查询INFORMATION_SCHEMA.INS_USERS系统表来查看用户信息例如
```sql
taos> select * from information_schema.ins_users;
name | super | enable | sysinfo | create_time |
================================================================================
test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
Query OK, 2 rows in database (0.001953s)
```
## 删除用户
```sql
DROP USER user_name;
```
## 修改用户信息
```sql
ALTER USER user_name alter_user_clause
alter_user_clause: {
PASS 'literal'
| ENABLE value
| SYSINFO value
}
```
- PASS修改用户密码。
- ENABLE修改用户是否启用。1 表示启用此用户0 表示禁用此用户。
- SYSINFO修改用户是否可查看系统信息。1 表示可以查看系统信息0 表示不可以查看系统信息。
例如,禁用 test 用户:
```sql
taos> alter user test enable 0;
Query OK, 0 of 0 rows affected (0.001160s)
```
## 授权
```sql
GRANT privileges ON priv_level TO user_name
privileges : {
ALL
| priv_type [, priv_type] ...
}
priv_type : {
READ
| WRITE
}
priv_level : {
dbname.*
| *.*
}
```
对用户授权。授权功能只包含在企业版中。
授权级别支持到DATABASE权限有READ和WRITE两种。
TDengine 有超级用户和普通用户两类用户。超级用户缺省创建为root拥有所有权限。使用超级用户创建出来的用户为普通用户。在未授权的情况下普通用户可以创建DATABASE并拥有自己创建的DATABASE的所有权限包括删除数据库、修改数据库、查询时序数据和写入时序数据。超级用户可以给普通用户授予其他DATABASE的读写权限使其可以在此DATABASE上读写数据但不能对其进行删除和修改数据库的操作。
对于非DATABASE的对象如USER、DNODE、UDF、QNODE等普通用户只有读权限一般为SHOW命令不能创建和修改。
## 撤销授权
```sql
REVOKE privileges ON priv_level FROM user_name
privileges : {
ALL
| priv_type [, priv_type] ...
}
priv_type : {
READ
| WRITE
}
priv_level : {
dbname.*
| *.*
}
```
收回对用户的授权。授权功能只包含在企业版中。

View File

@ -20,6 +20,9 @@ index_option:
functions:
function [, function] ...
```
### tag 索引
[tag 索引](../tag-index)
### SMA 索引

View File

@ -17,7 +17,7 @@ conn_id 可以通过 `SHOW CONNECTIONS` 获取。
## 终止查询
```sql
KILL QUERY kill_id;
KILL QUERY 'kill_id';
```
kill_id 可以通过 `SHOW QUERIES` 获取。

View File

@ -13,7 +13,7 @@ taosBenchmark (曾用名 taosdemo ) 是一个用于测试 TDengine 产品性能
taosBenchmark 有两种安装方式:
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](/operation/pkg-install)。
- 安装 TDengine 官方安装包的同时会自动安装 taosBenchmark, 详情请参考[ TDengine 安装](../../operation/pkg-install)。
- 单独编译 taos-tools 并安装, 详情请参考 [taos-tools](https://github.com/taosdata/taos-tools) 仓库。

View File

@ -105,6 +105,8 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-L, --loose-mode Using loose mode if the table name and column name
use letter and number only. Default is NOT.
-n, --no-escape No escape char '`'. Default is using it.
-Q, --dot-replace Repalce dot character with underline character in
the table name.
-T, --thread-num=THREAD_NUM Number of thread for dump in file. Default is
8.
-C, --cloud=CLOUD_DSN specify a DSN to access TDengine cloud service

View File

@ -89,3 +89,11 @@ taos -h h1.taos.com -s "use db; show tables;"
- 执行 `RESET QUERY CACHE` 可清除本地表 Schema 的缓存
- 批量执行 SQL 语句。可以将一系列的 TDengine CLI 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 TDengine CLI 里执行命令 `source <file-name>` 自动执行该文件里所有的 SQL 语句
- 输入 `q``quit``exit` 回车,可以退出 TDengine CLI
## TDengine CLI 导出查询结果到文件中
- 可以使用符号 “>>” 导出查询结果到某个文件中,语法为: sql 查询语句 >> ‘输出文件名’; 输出文件如果不写路径的话,将输出至当前目录下。如 select * from d0 >> /root/d0.csv; 将把查询结果输出到 /root/d0.csv 中。
## TDengine CLI 导入文件中的数据到表中
- 可以使用 insert into table_name file '输入文件名',把上一步中导出的数据文件再导入到指定表中。如 insert into d0 file '/root/d0.csv'; 表示把上面导出的数据全部再导致至 d0 表中。

View File

@ -33,41 +33,6 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
在无模式写入数据行协议中field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说:
- 如果两边有英文双引号,表示 BI---
title: Schemaless 写入
description: 'Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构'
---
在物联网应用中常会采集比较多的数据项用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级或者设备自身的硬件调整等原因数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作TDengine 提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时Schemaless
将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别你也可以通过SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
注意:无模式写入会自动建表,不需要手动建表,手动建表的话可能会出现未知的错误。
## 无模式写入行协议
TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议Line Protocol、OpenTSDB 的 telnet 行协议、OpenTSDB 的 JSON 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。
对于 InfluxDB、OpenTSDB 的标准写入协议请参考各自的文档。下面首先以 InfluxDB 的行协议为基础,介绍 TDengine 扩展的协议内容,允许用户采用更加精细的方式控制(超级表)模式。
Schemaless 采用一个字符串来表达一个数据行(可以向写入 API 中一次传入多行字符串来实现多个数据行的批量写入),其格式约定如下:
```json
measurement,tag_set field_set timestamp
```
其中:
- measurement 将作为数据表名。它与 tag_set 之间使用一个英文逗号来分隔。
- tag_set 将作为标签数据,其格式形如 `<tag_key>=<tag_value>,<tag_key>=<tag_value>`,也即可以使用英文逗号来分隔多个标签数据。它与 field_set 之间使用一个半角空格来分隔。
- field_set 将作为普通列数据,其格式形如 `<field_key>=<field_value>,<field_key>=<field_value>`,同样是使用英文逗号来分隔多个普通列的数据。它与 timestamp 之间使用一个半角空格来分隔。
- timestamp 即本行数据对应的主键时间戳。
tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要使用双引号(")。
在无模式写入数据行协议中field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说:
- 如果两边有英文双引号,表示 VARCHAR 类型。例如 `"abc"`
- 如果两边有英文双引号而且带有 L或l 前缀,表示 NCHAR 类型。例如 `L"报错信息"`
- 如果两边有英文双引号而且带有 G或g 前缀,表示 GEOMETRY 类型。例如 `G"Point(4.343 89.342)"`
@ -126,166 +91,6 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
```
:::tip
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序而是使用了标签名称按照字符串升序排列后的结果。所以tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
:::tip
为了让用户可以指定生成的表名可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
4. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。
5. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为
NULL。
6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field后面的数据按照这个顺序如果顺序不一样需要配置参数 smlDataFormat 为 false否则数据写入按照相同顺序写入库中数据会异常从3.0.3.0开始,自动检测顺序是否一致,该配置废弃。
9. 由于sql建表表名不支持点号.所以schemaless也对点号.做了处理如果schemaless自动建表的表名如果有点号.),会自动替换为下划线(\_。如果手动指定子表名的话子表名里有点号.),同样转化为下划线(\_
10. taos.cfg 增加 smlTsDefaultName 配置值为字符串只在client端起作用配置后schemaless自动建表的时间列名字可以通过该配置设置。不配置的话默认为 _ts
11. 无模式写入的数据超级表或子表名区分大小写
:::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
48KB从 3.0.5.0 版本开始为 64KB标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
:::
## 时间分辨率识别
无模式写入过程中支持三个指定的模式,具体如下
| **序号** | **值** | **说明** |
| -------- | ------------------- | ------------------------------- |
| 1 | SML_LINE_PROTOCOL | InfluxDB 行协议Line Protocol) |
| 2 | SML_TELNET_PROTOCOL | OpenTSDB 文本行协议 |
| 3 | SML_JSON_PROTOCOL | JSON 协议格式 |
在 SML_LINE_PROTOCOL 解析模式下,需要用户指定输入的时间戳的时间分辨率。可用的时间分辨率如下表所示:
| **序号** | **时间分辨率定义** | **含义** |
| -------- | --------------------------------- | -------------- |
| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | 未定义(无效) |
| 2 | TSDB_SML_TIMESTAMP_HOURS | 小时 |
| 3 | TSDB_SML_TIMESTAMP_MINUTES | 分钟 |
| 4 | TSDB_SML_TIMESTAMP_SECONDS | 秒 |
| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | 毫秒 |
| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | 微秒 |
| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | 纳秒 |
在 SML_TELNET_PROTOCOL 和 SML_JSON_PROTOCOL 模式下,根据时间戳的长度来确定时间精度(与 OpenTSDB 标准操作方式相同),此时会忽略用户指定的时间分辨率。
## 数据模式映射规则
本节将说明 InfluxDB 行协议(Line Protocol)的数据如何映射成为具有模式的数据。每个行协议中数据 measurement 映射为
超级表名称。tag_set 中的 标签名称为 数据模式中的标签名field_set 中的名称为列名称。以如下数据为例,说明映射规则:
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
```
该行数据映射生成一个超级表: st 其包含了 3 个类型为 nchar 的标签分别是t1, t2, t3。五个数据列分别是 tstimestampc1 (bigintc3(binary)c2 (bool), c4 (bigint。映射成为如下 SQL 语句:
```json
create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2))
```
## 数据模式变更处理
本节将说明不同行数据写入情况下,对于数据模式的影响。
在使用行协议写入一个明确的标识的字段类型的时候,后续更改该字段的类型定义,会出现明确的数据模式错误,即会触发写入 API 报告错误。如下所示,
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000
```
第一行的数据类型映射将 c4 列定义为 Double 但是第二行的数据又通过数值后缀方式声明该列为 BigInt 由此会触发无模式写入的解析错误。
如果列前面的行协议将数据列声明为了 binary 后续的要求长度更长的 binary 长度,此时会触发超级表模式的变更。
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000
st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000
```
第一行中行协议解析会声明 c5 列是一个 binary(4)的字段,第二次行数据写入会提取列 c5 仍然是 binary 列,但是其宽度为 6此时需要将 binary 的宽度增加到能够容纳 新字符串的宽度。
```json
st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000
st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
```
第二行数据相对于第一行来说增加了一个列 c6类型为 binary(6)。那么此时会自动增加一个列 c6 类型为 binary(6)。
## 写入完整性
TDengine 提供数据写入的幂等性保证,即您可以反复调用 API 进行出错数据的写入操作。但是不提供多行数据写入的原子性保证。即在多行数据一批次写入过程中,会出现部分数据写入成功,部分数据写入失败的情况。
## 错误码
如果是无模式写入过程中的数据本身错误,应用会得到 TSDB_CODE_TSC_LINE_SYNTAX_ERROR
错误信息,该错误信息表明错误发生在写入文本中。其他的错误码与原系统一致,可以通过
taos_errstr 获取具体的错误原因。
NARY(32) 类型。例如 `"abc"`
- 如果两边有英文双引号而且带有 L或l 前缀,表示 NCHAR 类型。例如 `L"报错信息"`
- 如果两边有英文双引号而且带有 G或g 前缀,表示 Geometry 类型。例如 `G"Point(4.343 89.342)"`
- 如果两边有英文双引号而且带有 B或b 前缀,表示 VARBINARY 类型
- 对空格、等号(=)、逗号(,)、双引号(")、反斜杠(\),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)。具体转义规则如下:
| **序号** | **域** | **需转义字符** |
| -------- | ----------- | ----------------------------- |
| 1 | 超级表名 | 逗号,空格 |
| 2 | 标签名 | 逗号,等号,空格 |
| 3 | 标签值 | 逗号,等号,空格 |
| 4 | 列名 | 逗号,等号,空格 |
| 5 | 列值 | 双引号,反斜杠 |
两个连续的反斜杠,第一个作为转义符,只有一个反斜杠则无需转义. 反斜杠转义规则举例如下:
| **序号** | **反斜杠** | **转义为** |
| -------- | ----------- | ----------------------------- |
| 1 | \ | \ |
| 2 | \\\\ | \ |
| 3 | \\\\\\ | \\\\ |
| 4 | \\\\\\\\ | \\\\ |
| 5 | \\\\\\\\\\ | \\\\\\ |
| 6 | \\\\\\\\\\\\ | \\\\\\ |
- 数值类型将通过后缀来区分数据类型:
| **序号** | **后缀** | **映射类型** | **大小(字节)** |
| -------- | ----------- | ----------------------------- | -------------- |
| 1 | 无或 f64 | double | 8 |
| 2 | f32 | float | 4 |
| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
| 4 | i16/u16 | SmallInt/USmallInt | 2 |
| 5 | i32/u32 | Int/UInt | 4 |
| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
- t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。
例如如下数据行表示:向名为 st 的超级表下的 t1 标签为 "3"NCHAR、t2 标签为 "4"NCHAR、t3
标签为 "t3"NCHAR的数据子表写入 c1 列为 3BIGINT、c2 列为 falseBOOL、c3
列为 "passit"BINARY、c4 列为 4DOUBLE、主键时间戳为 1626006833639000000 的一行数据。
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
```
需要注意的是,如果描述数据类型后缀时使用了错误的大小写,或者为数据指定的数据类型有误,均可能引发报错提示而导致数据写入失败。
## 无模式写入的主要处理逻辑
无模式写入按照如下原则来处理行数据:
1. 将使用如下规则来生成子表名:首先将 measurement 的名称和标签的 key 和 value 组合成为如下的字符串
```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
```
:::tip
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序而是使用了标签名称按照字符串升序排列后的结果。所以tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。

View File

@ -16,7 +16,7 @@ taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的
taosKeeper 有两种安装方式:
taosKeeper 安装方式:
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](/operation/pkg-install)。
- 安装 TDengine 官方安装包的同时会自动安装 taosKeeper, 详情请参考[ TDengine 安装](../../operation/pkg-install)。
- 单独编译 taosKeeper 并安装,详情请参考 [taosKeeper](https://github.com/taosdata/taoskeeper) 仓库。

View File

@ -0,0 +1,80 @@
---
title: 集群运维
description: TDengine 提供了多种集群运维手段以使集群运行更健康更高效
---
为了使集群运行更健康更高效TDengine 企业版提供了一些运维手段来帮助系统管理员更好地运维集群。
## 数据重整
TDengine 面向多种写入场景在有些写入场景下TDengine 的存储会导致数据存储的放大或数据文件的空洞等。这一方面影响数据的存储效率另一方面也会影响查询效率。为了解决上述问题TDengine 企业版提供了对数据的重整功能,即 DATA COMPACT 功能,将存储的数据文件重新整理,删除文件空洞和无效数据,提高数据的组织度,从而提高存储和查询的效率。
**语法**
```sql
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY']
```
**效果**
- 扫描并压缩指定的 DB 中所有 VGROUP 中 VNODE 的所有数据文件
- COMPCAT 会删除被删除数据以及被删除的表的数据
- COMPACT 会合并多个 STT 文件
- 可通过 start with 关键字指定 COMPACT 数据的起始时间
- 可通过 end with 关键字指定 COMPACT 数据的终止时间
**补充说明**
- COMPACT 为异步,执行 COMPACT 命令后不会等 COMPACT 结束就会返回。如果上一个 COMPACT 没有完成则再发起一个 COMPACT 任务,则会等上一个任务完成后再返回。
- COMPACT 可能阻塞写入,但不阻塞查询
- COMPACT 的进度不可观测
## 集群负载再平衡
当多副本集群中的一个或多个节点因为升级或其它原因而重启后,有可能出现集群中各个 dnode 负载不均衡的现象,极端情况下会出现所有 vgroup 的 leader 都位于同一个 dnode 的情况。为了解决这个问题,可以使用下面的命令
```sql
balance vgroup leader;
```
**功能**
让所有的 vgroup 的 leade r在各自的replica节点上均匀分布。这个命令会让 vgroup 强制重新选举,通过重新选举,在选举的过程中,变换 vgroup 的leader通过这个方式最终让leader均匀分布。
**注意**
Raft选举本身带有随机性所以通过选举的重新分布产生的均匀分布也是带有一定的概率不会完全的均匀。**该命令的副作用是影响查询和写入**在vgroup重新选举时从开始选举到选举出新的 leader 这段时间,这 个vgroup 无法写入和查询。选举过程一般在秒级完成。所有的vgroup会依次逐个重新选举。
## 恢复数据节点
在多节点三副本的集群环境中,如果某个 dnode 的磁盘损坏,该 dnode 会自动退出,但集群中其它的 dnode 仍然能够继续提供写入和查询服务。
在更换了损坏的磁盘后,如果想要让曾经主动退出的 dnode 重新加入集群提供服务,可以通过 `restore dnode` 命令来恢复该数据节点上的部分或全部逻辑节点,该功能依赖多副本中的其它副本进行数据复制,所以只在集群中 dnode 数量大于等于 3 且副本数为 3 的情况下能够工作。
```sql
restore dnode <dnode_id># 恢复dnode上的mnode所有vnode和qnode
restore mnode on dnode <dnode_id># 恢复dnode上的mnode
restore vnode on dnode <dnode_id> # 恢复dnode上的所有vnode
restore qnode on dnode <dnode_id># 恢复dnode上的qnode
```
**限制**
- 该功能是基于已有的复制功能的恢复,不是灾难恢复或者备份恢复,所以对于要恢复的 mnode 和 vnode来说使用该命令的前提是还存在该 mnode 或 vnode 的其它两个副本仍然能够正常工作。
- 该命令不能修复数据目录中的个别文件的损坏或者丢失。例如,如果某个 mnode 或者 vnode 中的个别文件或数据损坏,无法单独恢复损坏的某个文件或者某块数据。此时,可以选择将该 mnode/vnode 的数据全部清空再进行恢复。
## 虚拟组分裂 Scale Out
当一个 vgroup 因为子表数过多而导致 CPU 或 Disk 资源使用量负载过高时,增加 dnode 节点后,可通过 `split vgroup` 命令把该 vgroup 分裂为两个虚拟组。分裂完成后,新产生的两个 vgroup 承担原来由一个 vgroup 提供的读写服务。这也是 TDengine 为企业版用户提供的 scale out 集群的能力。
```sql
split vgroup <vgroup_id>
```
**注意**
- 单副本库虚拟组,在分裂完成后,历史时序数据总磁盘空间使用量,可能会翻倍。所以,在执行该操作之前,通过增加 dnode 节点方式,确保集群中有足够的 CPU 和磁盘资源,避免资源不足现象发生。
- 该命令为 DB 级事务执行过程当前DB的其它管理事务将会被拒绝。集群中其它DB不受影响。
- 分裂任务执行过程中,可持续提供读写服务;期间,可能存在可感知的短暂的读写业务中断。
- 在分裂过程中,不支持流和订阅。分裂结束后,历史 WAL 会清空。
- 分裂过程中,可支持节点宕机重启容错;但不支持节点磁盘故障容错。

View File

@ -0,0 +1,56 @@
---
title: 多级存储
---
## 多级存储
说明:多级存储功能仅企业版支持。
在默认配置下TDengine 会将所有数据保存在 /var/lib/taos 目录下,而且每个 vnode 的数据文件保存在该目录下的不同目录。为扩大存储空间,尽量减少文件读取的瓶颈,提高数据吞吐率 TDengine 可通过配置系统参数 dataDir 让多个挂载的硬盘被系统同时使用。
除此之外TDengine 也提供了数据分级存储的功能,将不同时间段的数据存储在挂载的不同介质上的目录里,从而实现不同“热度”的数据存储在不同的存储介质上,充分利用存储,节约成本。比如,最新采集的数据需要经常访问,对硬盘的读取性能要求高,那么用户可以配置将这些数据存储在 SSD 盘上。超过一定期限的数据,查询需求量没有那么高,那么可以存储在相对便宜的 HDD 盘上。
多级存储支持 3 级,每级最多可配置 16 个挂载点。
TDengine 多级存储配置方式如下(在配置文件/etc/taos/taos.cfg 中):
```
dataDir [path] <level> <primary>
```
- path: 挂载点的文件夹路径
- level: 介质存储等级,取值为 012。
0 级存储最新的数据1 级存储次新的数据2 级存储最老的数据,省略默认为 0。
各级存储之间的数据流向0 级存储 -> 1 级存储 -> 2 级存储。
同一存储等级可挂载多个硬盘,同一存储等级上的数据文件分布在该存储等级的所有硬盘上。
需要说明的是,数据在不同级别的存储介质上的移动,是由系统自动完成的,用户无需干预。
- primary: 是否为主挂载点0或 1省略默认为 1。
在配置中只允许一个主挂载点的存在level=0primary=1例如采用如下的配置方式
```
dataDir /mnt/data1 0 1
dataDir /mnt/data2 0 0
dataDir /mnt/data3 1 0
dataDir /mnt/data4 1 0
dataDir /mnt/data5 2 0
dataDir /mnt/data6 2 0
```
:::note
1. 多级存储不允许跨级配置,合法的配置方案有:仅 0 级,仅 0 级+ 1 级,以及 0 级+ 1 级+ 2 级。而不允许只配置 level=0 和 level=2而不配置 level=1。
2. 禁止手动移除使用中的挂载盘,挂载盘目前不支持非本地的网络盘。
3. 多级存储目前不支持删除已经挂载的硬盘的功能。
:::
## 0 级负载均衡
在多级存储中,有且只有一个主挂载点,主挂载点承担了系统中最重要的元数据在座,同时各个 vnode 的主目录均存在于当前 dnode 主挂载点上,从而导致该 dnode 的写入性能受限于单个磁盘的 IO 吞吐能力。
从 TDengine 3.1.0.0 开始,如果一个 dnode 配置了多个 0 级挂载点,我们将该 dnode 上所有 vnode 的主目录均衡分布在所有的 0 级挂载点上,由这些 0 级挂载点共同承担写入负荷。在网络 I/O 及其它处理资源不成为瓶颈的情况下,通过优化集群配置,测试结果证明整个系统的写入能力和 0 级挂载点的数量呈现线性关系,即随着 0 级挂载点数量的增加,整个系统的写入能力也成倍增加。
## 同级挂载点选择策略
一般情况下,当 TDengine 要从同级挂载点中选择一个用于生成新的数据文件时,采用 round robin 策略进行选择。但现实中有可能每个磁盘的容量不相同,或者容量相同但写入的数据量不相同,这就导致会出现每个磁盘上的可用空间不均衡,在实际进行选择时有可能会选择到一个剩余空间已经很小的磁盘。为了解决这个问题,从 3.1.1.0 开始引入了一个新的配置 `minDiskFreeSize`,当某块磁盘上的可用空间小于等于这个阈值时,该磁盘将不再被选择用于生成新的数据文件。该配置项的单位为字节,其值应该大于 2GB即会跳过可用空间小于 2GB 的挂载点。

View File

@ -23,7 +23,7 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送
1. Linux 操作系统
2. 已安装 Java 8 和 Maven
3. 已安装 Git、curl、vi
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](../../operation/pkg-install)
## 安装 Kafka

437
docs/zh/20-third-party/70-seeq.md vendored Normal file
View File

@ -0,0 +1,437 @@
---
sidebar_label: Seeq
title: Seeq
description: 如何使用 Seeq 和 TDengine 进行时序数据分析
---
# 如何使用 Seeq 和 TDengine 进行时序数据分析
## 方案介绍
Seeq 是制造业和工业互联网IIOT高级分析软件。Seeq 支持在工艺制造组织中使用机器学习创新的新功能。这些功能使组织能够将自己或第三方机器学习算法部署到前线流程工程师和主题专家使用的高级分析应用程序,从而使单个数据科学家的努力扩展到许多前线员工。
通过 TDengine Java connector Seeq 可以轻松支持查询 TDengine 提供的时序数据,并提供数据展现、分析、预测等功能。
### Seeq 安装方法
从 (Seeq 官网)[https://www.seeq.com/customer-download]下载相关软件,例如 Seeq Server 和 Seeq Data Lab 等。
### Seeq Server 安装和启动
```
tar xvzf seeq-server-xxx.tar.gz
cd seeq-server-installer
sudo ./install
sudo seeq service enable
sudo seeq start
```
### Seeq Data Lab Server 安装和启动
Seeq Data Lab 需要安装在和 Seeq Server 不同的服务器上,并通过配置和 Seeq Server 互联。详细安装配置指令参见(Seeq 官方文档)[https://support.seeq.com/space/KB/1034059842]。
```
tar xvf seeq-data-lab-<version>-64bit-linux.tar.gz
sudo seeq-data-lab-installer/install -f /opt/seeq/seeq-data-lab -g /var/opt/seeq -u seeq
sudo seeq config set Network/DataLab/Hostname localhost
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231)
sudo seeq config set Network/Hostname <value> # the host IP or URL of the main Seeq Server
# If the main Seeq server is configured to listen over HTTPS
sudo seeq config set Network/Webserver/SecurePort 443 # the secure port of the main Seeq Server (usually 443)
# If the main Seeq server is NOT configured to listen over HTTPS
sudo seeq config set Network/Webserver/Port <value>
#On the main Seeq server, open a Seeq Command Prompt and set the hostname of the Data Lab server:
sudo seeq config set Network/DataLab/Hostname <value> # the host IP (not URL) of the Data Lab server
sudo seeq config set Network/DataLab/Port 34231 # the port of the Data Lab server (usually 34231
```
## TDengine 本地实例安装方法
请参考(官网文档)[https://docs.taosdata.com/get-started/package/]。
## TDengine Cloud 访问方法
如果使用 Seeq 连接 TDengine Cloud请在 https://cloud.taosdata.com 申请帐号并登录查看如何访问 TDengine Cloud。
## 如何配置 Seeq 访问 TDengine
1. 查看 data 存储位置
```
sudo seeq config get Folders/Data
```
2. 从 maven.org 下载 TDengine Java connector 包,目前最新版本为(3.2.4)[https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.4/taos-jdbcdriver-3.2.4-dist.jar],并拷贝至 data 存储位置的 plugins\lib 中。
3. 重新启动 seeq server
```
sudo seeq restart
```
4. 输入 License
使用浏览器访问 ip:34216 并按照说明输入 license。
## 使用 Seeq 分析 TDengine 时序数据
本章节演示如何使用 Seeq 软件配合 TDengine 进行时序数据分析。
### 场景介绍
示例场景为一个电力系统,用户每天从电站仪表收集用电量数据,并将其存储在 TDengine 集群中。现在用户想要预测电力消耗将会如何发展,并购买更多设备来支持它。用户电力消耗随着每月订单变化而不同,另外考虑到季节变化,电力消耗量会有所不同。这个城市位于北半球,所以在夏天会使用更多的电力。我们模拟数据来反映这些假定。
### 数据 Schema
```
CREATE STABLE meters (ts TIMESTAMP, num INT, temperature FLOAT, goods INT) TAGS (device NCHAR(20));
CREATE TABLE goods (ts1 TIMESTAMP, ts2 TIMESTAMP, goods FLOAT);
```
!(Seeq demo schema)[./seeq/seeq-demo-schema.webp]
### 构造数据方法
```
python mockdata.py
taos -s "insert into power.goods select _wstart, _wstart + 10d, avg(goods) from power.meters interval(10d);"
```
源代码托管在(github 仓库)[https://github.com/sangshuduo/td-forecasting]。
### 使用 Seeq 进行数据分析
#### 配置数据源Data Source
使用 Seeq 管理员角色的帐号登录,并新建数据源。
- Power
```
{
"QueryDefinitions": [
{
"Name": "PowerNum",
"Type": "SIGNAL",
"Sql": "SELECT ts, num FROM meters",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Num",
"Sql": null,
"Uom": "string"
},
{
"Name": "Interpolation Method",
"Value": "linear",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Interpolation",
"Value": "2day",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": null
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
- Goods
```
{
"QueryDefinitions": [
{
"Name": "PowerGoods",
"Type": "CONDITION",
"Sql": "SELECT ts1, ts2, goods FROM power.goods",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Goods",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Duration",
"Value": "10days",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": [
{
"Name": "goods",
"Value": "${columnResult}",
"Column": "goods",
"Uom": "string"
}
]
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
- Temperature
```
{
"QueryDefinitions": [
{
"Name": "PowerNum",
"Type": "SIGNAL",
"Sql": "SELECT ts, temperature FROM meters",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Temperature",
"Sql": null,
"Uom": "string"
},
{
"Name": "Interpolation Method",
"Value": "linear",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Interpolation",
"Value": "2day",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": null
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://127.0.0.1:6041/power?user=root&password=taosdata",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
#### 使用 Seeq Workbench
登录 Seeq 服务页面并新建 Seeq Workbench通过选择数据源搜索结果和根据需要选择不同的工具可以进行数据展现或预测详细使用方法参见(官方知识库)[https://support.seeq.com/space/KB/146440193/Seeq+Workbench]。
!(Seeq Workbench)[./seeq/seeq-demo-workbench.webp]
#### 用 Seeq Data Lab Server 进行进一步的数据分析
登录 Seeq 服务页面并新建 Seeq Data Lab可以进一步使用 Python 编程或其他机器学习工具进行更复杂的数据挖掘功能。
```Python
from seeq import spy
spy.options.compatibility = 189
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import mlforecast
import lightgbm as lgb
from mlforecast.target_transforms import Differences
from sklearn.linear_model import LinearRegression
ds = spy.search({'ID': "8C91A9C7-B6C2-4E18-AAAF-XXXXXXXXX"})
print(ds)
sig = ds.loc[ds['Name'].isin(['Num'])]
print(sig)
data = spy.pull(sig, start='2015-01-01', end='2022-12-31', grid=None)
print("data.info()")
data.info()
print(data)
#data.plot()
print("data[Num].info()")
data['Num'].info()
da = data['Num'].index.tolist()
#print(da)
li = data['Num'].tolist()
#print(li)
data2 = pd.DataFrame()
data2['ds'] = da
print('1st data2 ds info()')
data2['ds'].info()
#data2['ds'] = pd.to_datetime(data2['ds']).to_timestamp()
data2['ds'] = pd.to_datetime(data2['ds']).astype('int64')
data2['y'] = li
print('2nd data2 ds info()')
data2['ds'].info()
print(data2)
data2.insert(0, column = "unique_id", value="unique_id")
print("Forecasting ...")
forecast = mlforecast.MLForecast(
models = lgb.LGBMRegressor(),
freq = 1,
lags=[365],
target_transforms=[Differences([365])],
)
forecast.fit(data2)
predicts = forecast.predict(365)
pd.concat([data2, predicts]).set_index("ds").plot(title = "current data with forecast")
plt.show()
```
运行程序输出结果:
!(Seeq forecast result)[./seeq/seeq-forecast-result.webp]
### 配置 Seeq 数据源连接 TDengine Cloud
配置 Seeq 数据源连接 TDengine Cloud 和连接 TDengine 本地安装实例没有本质的不同,只要登录 TDengine Cloud 后选择“编程 - Java”并拷贝带 token 字符串的 JDBC 填写为 Seeq Data Source 的 DatabaseJdbcUrl 值。
注意使用 TDengine Cloud 时 SQL 命令中需要指定数据库名称。
#### 用 TDengine Cloud 作为数据源的配置内容示例:
```
{
"QueryDefinitions": [
{
"Name": "CloudVoltage",
"Type": "SIGNAL",
"Sql": "SELECT ts, voltage FROM test.meters",
"Enabled": true,
"TestMode": false,
"TestQueriesDuringSync": true,
"InProgressCapsulesEnabled": false,
"Variables": null,
"Properties": [
{
"Name": "Name",
"Value": "Voltage",
"Sql": null,
"Uom": "string"
},
{
"Name": "Interpolation Method",
"Value": "linear",
"Sql": null,
"Uom": "string"
},
{
"Name": "Maximum Interpolation",
"Value": "2day",
"Sql": null,
"Uom": "string"
}
],
"CapsuleProperties": null
}
],
"Type": "GENERIC",
"Hostname": null,
"Port": 0,
"DatabaseName": null,
"Username": "root",
"Password": "taosdata",
"InitialSql": null,
"TimeZone": null,
"PrintRows": false,
"UseWindowsAuth": false,
"SqlFetchBatchSize": 100000,
"UseSSL": false,
"JdbcProperties": null,
"GenericDatabaseConfig": {
"DatabaseJdbcUrl": "jdbc:TAOS-RS://gw.cloud.taosdata.com?useSSL=true&token=41ac9d61d641b6b334e8b76f45f5a8XXXXXXXXXX",
"SqlDriverClassName": "com.taosdata.jdbc.rs.RestfulDriver",
"ResolutionInNanoseconds": 1000,
"ZonedColumnTypes": []
}
}
```
#### TDengine Cloud 作为数据源的 Seeq Workbench 界面示例
!(Seeq workbench with TDengine cloud)[./seeq/seeq-workbench-with-tdengine-cloud.webp]
## 方案总结
通过集成Seeq和TDengine可以充分利用TDengine高效的存储和查询性能同时也可以受益于Seeq提供给用户的强大数据可视化和分析功能。
这种集成使用户能够充分利用TDengine的高性能时序数据存储和检索确保高效处理大量数据。同时Seeq提供高级分析功能如数据可视化、异常检测、相关性分析和预测建模使用户能够获得有价值的洞察并基于数据进行决策。
综合来看Seeq和TDengine共同为制造业、工业物联网和电力系统等各行各业的时序数据分析提供了综合解决方案。高效数据存储和先进的分析相结合赋予用户充分发挥时序数据潜力的能力推动运营改进并支持预测和规划分析应用。

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

View File

@ -10,6 +10,14 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.1.0.3
<Release type="tdengine" version="3.1.0.3" />
## 3.1.0.2
<Release type="tdengine" version="3.1.0.2" />
## 3.1.0.0
<Release type="tdengine" version="3.1.0.0" />

View File

@ -133,6 +133,7 @@
<configuration>
<source>8</source>
<target>8</target>
<encoding>UTF-8</encoding>
</configuration>
</plugin>

View File

@ -8,4 +8,4 @@ java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host <hostname> -data
```
如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/local/lib 来指定寻找共享库的路径。
请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。

View File

@ -32,12 +32,12 @@ static void queryDB(TAOS *taos, char *command) {
taos_free_result(pSql);
pSql = NULL;
}
pSql = taos_query(taos, command);
code = taos_errno(pSql);
if (0 == code) {
break;
}
}
}
if (code != 0) {
@ -63,7 +63,7 @@ int main(int argc, char *argv[]) {
TAOS *taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
if (taos == NULL) {
printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/);
printf("failed to connect to server, reason:%s\n", taos_errstr(NULL));
exit(1);
}
for (int i = 0; i < 100; i++) {
@ -86,14 +86,14 @@ void Test(TAOS *taos, char *qstr, int index) {
for (i = 0; i < 10; ++i) {
sprintf(qstr, "insert into m1 values (%" PRId64 ", %d, %d, %d, %d, %f, %lf, '%s')", (uint64_t)(1546300800000 + i * 1000), i, i, i, i*10000000, i*1.0, i*2.0, "hello");
printf("qstr: %s\n", qstr);
// note: how do you wanna do if taos_query returns non-NULL
// if (taos_query(taos, qstr)) {
// printf("insert row: %i, reason:%s\n", i, taos_errstr(taos));
// }
TAOS_RES *result1 = taos_query(taos, qstr);
if (result1 == NULL || taos_errno(result1) != 0) {
printf("failed to insert row, reason:%s\n", taos_errstr(result1));
printf("failed to insert row, reason:%s\n", taos_errstr(result1));
taos_free_result(result1);
exit(1);
} else {
@ -107,7 +107,7 @@ void Test(TAOS *taos, char *qstr, int index) {
sprintf(qstr, "SELECT * FROM m1");
result = taos_query(taos, qstr);
if (result == NULL || taos_errno(result) != 0) {
printf("failed to select, reason:%s\n", taos_errstr(result));
printf("failed to select, reason:%s\n", taos_errstr(result));
taos_free_result(result);
exit(1);
}

View File

@ -43,7 +43,7 @@ int main(int argc, char *argv[])
taos_free_result(result);
// create table
const char* sql = "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10))";
const char* sql = "create table m1 (ts timestamp, b bool, v1 tinyint, v2 smallint, v4 int, v8 bigint, f4 float, f8 double, bin binary(40), blob nchar(10), varbin varbinary(16))";
result = taos_query(taos, sql);
code = taos_errno(result);
if (code != 0) {
@ -68,6 +68,7 @@ int main(int argc, char *argv[])
double f8;
char bin[40];
char blob[80];
int8_t varbin[16];
} v = {0};
int32_t boolLen = sizeof(int8_t);
@ -80,7 +81,7 @@ int main(int argc, char *argv[])
int32_t ncharLen = 30;
stmt = taos_stmt_init(taos);
TAOS_MULTI_BIND params[10];
TAOS_MULTI_BIND params[11];
params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
params[0].buffer_length = sizeof(v.ts);
params[0].buffer = &v.ts;
@ -152,9 +153,19 @@ int main(int argc, char *argv[])
params[9].is_null = NULL;
params[9].num = 1;
int8_t tmp[16] = {'a', 0, 1, 13, '1'};
int32_t vbinLen = 5;
memcpy(v.varbin, tmp, sizeof(v.varbin));
params[10].buffer_type = TSDB_DATA_TYPE_VARBINARY;
params[10].buffer_length = sizeof(v.varbin);
params[10].buffer = v.varbin;
params[10].length = &vbinLen;
params[10].is_null = NULL;
params[10].num = 1;
char is_null = 1;
sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?)";
sql = "insert into m1 values(?,?,?,?,?,?,?,?,?,?,?)";
code = taos_stmt_prepare(stmt, sql, 0);
if (code != 0){
printf("failed to execute taos_stmt_prepare. code:0x%x\n", code);
@ -162,7 +173,7 @@ int main(int argc, char *argv[])
v.ts = 1591060628000;
for (int i = 0; i < 10; ++i) {
v.ts += 1;
for (int j = 1; j < 10; ++j) {
for (int j = 1; j < 11; ++j) {
params[j].is_null = ((i == j) ? &is_null : 0);
}
v.b = (int8_t)i % 2;
@ -216,7 +227,7 @@ int main(int argc, char *argv[])
printf("expect two rows, but %d rows are fetched\n", rows);
}
taos_free_result(result);
// taos_free_result(result);
taos_stmt_close(stmt);
return 0;

View File

@ -280,7 +280,7 @@ void consume_repeatly(tmq_t* tmq) {
code = tmq_offset_seek(tmq, topic_name, p->vgId, p->begin);
if (code != 0) {
fprintf(stderr, "failed to seek to %ld, reason:%s", p->begin, tmq_err2str(code));
fprintf(stderr, "failed to seek to %d, reason:%s", (int)p->begin, tmq_err2str(code));
}
}

View File

@ -292,14 +292,14 @@ DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msgs offset + 1
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment);
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);

View File

@ -152,6 +152,8 @@ enum {
STREAM_INPUT__DATA_RETRIEVE,
STREAM_INPUT__GET_RES,
STREAM_INPUT__CHECKPOINT,
STREAM_INPUT__CHECKPOINT_TRIGGER,
STREAM_INPUT__TRANS_STATE,
STREAM_INPUT__REF_DATA_BLOCK,
STREAM_INPUT__DESTROY,
};
@ -168,7 +170,9 @@ typedef enum EStreamType {
STREAM_PULL_DATA,
STREAM_PULL_OVER,
STREAM_FILL_OVER,
STREAM_CHECKPOINT,
STREAM_CREATE_CHILD_TABLE,
STREAM_TRANS_STATE,
} EStreamType;
#pragma pack(push, 1)

View File

@ -179,6 +179,8 @@ int32_t getJsonValueLen(const char* data);
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue);
int32_t colDataCopyNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
uint32_t numOfRows, bool isNull);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
const SColumnInfoData* pSource, int32_t numOfRow2);
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
@ -241,7 +243,7 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols);
const char* blockDecode(SSDataBlock* pBlock, const char* pData);
// for debug
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf, const char* taskIdStr);
int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pDataBlocks, const STSchema* pTSchema, int64_t uid, int32_t vgId,
tb_uid_t suid);

View File

@ -102,6 +102,11 @@ extern uint16_t tsMonitorPort;
extern int32_t tsMonitorMaxLogs;
extern bool tsMonitorComp;
// audit
extern bool tsEnableAudit;
extern char tsAuditFqdn[];
extern uint16_t tsAuditPort;
// telem
extern bool tsEnableTelem;
extern int32_t tsTelemInterval;
@ -130,6 +135,7 @@ extern bool tsKeepColumnName;
extern bool tsEnableQueryHb;
extern bool tsEnableScience;
extern bool tsTtlChangeOnWrite;
extern int32_t tsTtlFlushThreshold;
extern int32_t tsRedirectPeriod;
extern int32_t tsRedirectFactor;
extern int32_t tsRedirectMaxPeriod;
@ -161,6 +167,7 @@ extern char tsCompressor[];
// tfs
extern int32_t tsDiskCfgNum;
extern SDiskCfg tsDiskCfg[];
extern int64_t tsMinDiskFreeSize;
// udf
extern bool tsStartUdfd;
@ -185,7 +192,9 @@ extern int32_t tsTransPullupInterval;
extern int32_t tsMqRebalanceInterval;
extern int32_t tsStreamCheckpointTickInterval;
extern int32_t tsTtlUnit;
extern int32_t tsTtlPushInterval;
extern int32_t tsTtlPushIntervalSec;
extern int32_t tsTtlBatchDropNum;
extern int32_t tsTrimVDbIntervalSec;
extern int32_t tsGrantHBInterval;
extern int32_t tsUptimeInterval;
@ -199,6 +208,7 @@ extern bool tsFilterScalarMode;
extern int32_t tsKeepTimeOffset;
extern int32_t tsMaxStreamBackendCache;
extern int32_t tsPQSortMemThreshold;
extern int32_t tsResolveFQDNRetryTime;
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)

View File

@ -77,7 +77,8 @@ static inline bool tmsgIsValid(tmsg_t type) {
}
static inline bool vnodeIsMsgBlock(tmsg_t type) {
return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) ||
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_COMMIT);
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_COMMIT) ||
(type == TDMT_SYNC_CONFIG_CHANGE);
}
static inline bool syncUtilUserCommit(tmsg_t msgType) {
@ -212,6 +213,215 @@ typedef enum _mgmt_table {
#define TD_REQ_FROM_APP 0
#define TD_REQ_FROM_TAOX 1
typedef enum ENodeType {
// Syntax nodes are used in parser and planner module, and some are also used in executor module, such as COLUMN,
// VALUE, OPERATOR, FUNCTION and so on.
QUERY_NODE_COLUMN = 1,
QUERY_NODE_VALUE,
QUERY_NODE_OPERATOR,
QUERY_NODE_LOGIC_CONDITION,
QUERY_NODE_FUNCTION,
QUERY_NODE_REAL_TABLE,
QUERY_NODE_TEMP_TABLE,
QUERY_NODE_JOIN_TABLE,
QUERY_NODE_GROUPING_SET,
QUERY_NODE_ORDER_BY_EXPR,
QUERY_NODE_LIMIT,
QUERY_NODE_STATE_WINDOW,
QUERY_NODE_SESSION_WINDOW,
QUERY_NODE_INTERVAL_WINDOW,
QUERY_NODE_NODE_LIST,
QUERY_NODE_FILL,
QUERY_NODE_RAW_EXPR, // Only be used in parser module.
QUERY_NODE_TARGET,
QUERY_NODE_DATABLOCK_DESC,
QUERY_NODE_SLOT_DESC,
QUERY_NODE_COLUMN_DEF,
QUERY_NODE_DOWNSTREAM_SOURCE,
QUERY_NODE_DATABASE_OPTIONS,
QUERY_NODE_TABLE_OPTIONS,
QUERY_NODE_INDEX_OPTIONS,
QUERY_NODE_EXPLAIN_OPTIONS,
QUERY_NODE_STREAM_OPTIONS,
QUERY_NODE_LEFT_VALUE,
QUERY_NODE_COLUMN_REF,
QUERY_NODE_WHEN_THEN,
QUERY_NODE_CASE_WHEN,
QUERY_NODE_EVENT_WINDOW,
QUERY_NODE_HINT,
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100,
QUERY_NODE_SELECT_STMT,
QUERY_NODE_VNODE_MODIFY_STMT,
QUERY_NODE_CREATE_DATABASE_STMT,
QUERY_NODE_DROP_DATABASE_STMT,
QUERY_NODE_ALTER_DATABASE_STMT,
QUERY_NODE_FLUSH_DATABASE_STMT,
QUERY_NODE_TRIM_DATABASE_STMT,
QUERY_NODE_CREATE_TABLE_STMT,
QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
QUERY_NODE_CREATE_MULTI_TABLES_STMT,
QUERY_NODE_DROP_TABLE_CLAUSE,
QUERY_NODE_DROP_TABLE_STMT,
QUERY_NODE_DROP_SUPER_TABLE_STMT,
QUERY_NODE_ALTER_TABLE_STMT,
QUERY_NODE_ALTER_SUPER_TABLE_STMT,
QUERY_NODE_CREATE_USER_STMT,
QUERY_NODE_ALTER_USER_STMT,
QUERY_NODE_DROP_USER_STMT,
QUERY_NODE_USE_DATABASE_STMT,
QUERY_NODE_CREATE_DNODE_STMT,
QUERY_NODE_DROP_DNODE_STMT,
QUERY_NODE_ALTER_DNODE_STMT,
QUERY_NODE_CREATE_INDEX_STMT,
QUERY_NODE_DROP_INDEX_STMT,
QUERY_NODE_CREATE_QNODE_STMT,
QUERY_NODE_DROP_QNODE_STMT,
QUERY_NODE_CREATE_BNODE_STMT,
QUERY_NODE_DROP_BNODE_STMT,
QUERY_NODE_CREATE_SNODE_STMT,
QUERY_NODE_DROP_SNODE_STMT,
QUERY_NODE_CREATE_MNODE_STMT,
QUERY_NODE_DROP_MNODE_STMT,
QUERY_NODE_CREATE_TOPIC_STMT,
QUERY_NODE_DROP_TOPIC_STMT,
QUERY_NODE_DROP_CGROUP_STMT,
QUERY_NODE_ALTER_LOCAL_STMT,
QUERY_NODE_EXPLAIN_STMT,
QUERY_NODE_DESCRIBE_STMT,
QUERY_NODE_RESET_QUERY_CACHE_STMT,
QUERY_NODE_COMPACT_DATABASE_STMT,
QUERY_NODE_CREATE_FUNCTION_STMT,
QUERY_NODE_DROP_FUNCTION_STMT,
QUERY_NODE_CREATE_STREAM_STMT,
QUERY_NODE_DROP_STREAM_STMT,
QUERY_NODE_BALANCE_VGROUP_STMT,
QUERY_NODE_MERGE_VGROUP_STMT,
QUERY_NODE_REDISTRIBUTE_VGROUP_STMT,
QUERY_NODE_SPLIT_VGROUP_STMT,
QUERY_NODE_SYNCDB_STMT,
QUERY_NODE_GRANT_STMT,
QUERY_NODE_REVOKE_STMT,
QUERY_NODE_SHOW_DNODES_STMT,
QUERY_NODE_SHOW_MNODES_STMT,
QUERY_NODE_SHOW_MODULES_STMT,
QUERY_NODE_SHOW_QNODES_STMT,
QUERY_NODE_SHOW_SNODES_STMT,
QUERY_NODE_SHOW_BNODES_STMT,
QUERY_NODE_SHOW_CLUSTER_STMT,
QUERY_NODE_SHOW_DATABASES_STMT,
QUERY_NODE_SHOW_FUNCTIONS_STMT,
QUERY_NODE_SHOW_INDEXES_STMT,
QUERY_NODE_SHOW_STABLES_STMT,
QUERY_NODE_SHOW_STREAMS_STMT,
QUERY_NODE_SHOW_TABLES_STMT,
QUERY_NODE_SHOW_TAGS_STMT,
QUERY_NODE_SHOW_USERS_STMT,
QUERY_NODE_SHOW_LICENCES_STMT,
QUERY_NODE_SHOW_VGROUPS_STMT,
QUERY_NODE_SHOW_TOPICS_STMT,
QUERY_NODE_SHOW_CONSUMERS_STMT,
QUERY_NODE_SHOW_CONNECTIONS_STMT,
QUERY_NODE_SHOW_QUERIES_STMT,
QUERY_NODE_SHOW_APPS_STMT,
QUERY_NODE_SHOW_VARIABLES_STMT,
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_USER_PRIVILEGES_STMT,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
QUERY_NODE_SHOW_SCORES_STMT,
QUERY_NODE_SHOW_TABLE_TAGS_STMT,
QUERY_NODE_KILL_CONNECTION_STMT,
QUERY_NODE_KILL_QUERY_STMT,
QUERY_NODE_KILL_TRANSACTION_STMT,
QUERY_NODE_DELETE_STMT,
QUERY_NODE_INSERT_STMT,
QUERY_NODE_QUERY,
QUERY_NODE_SHOW_DB_ALIVE_STMT,
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
QUERY_NODE_RESTORE_DNODE_STMT,
QUERY_NODE_RESTORE_QNODE_STMT,
QUERY_NODE_RESTORE_MNODE_STMT,
QUERY_NODE_RESTORE_VNODE_STMT,
QUERY_NODE_PAUSE_STREAM_STMT,
QUERY_NODE_RESUME_STREAM_STMT,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
QUERY_NODE_LOGIC_PLAN_JOIN,
QUERY_NODE_LOGIC_PLAN_AGG,
QUERY_NODE_LOGIC_PLAN_PROJECT,
QUERY_NODE_LOGIC_PLAN_VNODE_MODIFY,
QUERY_NODE_LOGIC_PLAN_EXCHANGE,
QUERY_NODE_LOGIC_PLAN_MERGE,
QUERY_NODE_LOGIC_PLAN_WINDOW,
QUERY_NODE_LOGIC_PLAN_FILL,
QUERY_NODE_LOGIC_PLAN_SORT,
QUERY_NODE_LOGIC_PLAN_PARTITION,
QUERY_NODE_LOGIC_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_LOGIC_PLAN_INTERP_FUNC,
QUERY_NODE_LOGIC_SUBPLAN,
QUERY_NODE_LOGIC_PLAN,
QUERY_NODE_LOGIC_PLAN_GROUP_CACHE,
QUERY_NODE_LOGIC_PLAN_DYN_QUERY_CTRL,
// physical plan node
QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN,
QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN,
QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN,
QUERY_NODE_PHYSICAL_PLAN_PROJECT,
QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN,
QUERY_NODE_PHYSICAL_PLAN_HASH_AGG,
QUERY_NODE_PHYSICAL_PLAN_EXCHANGE,
QUERY_NODE_PHYSICAL_PLAN_MERGE,
QUERY_NODE_PHYSICAL_PLAN_SORT,
QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT,
QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_FILL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION,
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
QUERY_NODE_PHYSICAL_PLAN_INSERT,
QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT,
QUERY_NODE_PHYSICAL_PLAN_DELETE,
QUERY_NODE_PHYSICAL_SUBPLAN,
QUERY_NODE_PHYSICAL_PLAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT,
QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN,
QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE,
QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL
} ENodeType;
typedef struct {
int32_t vgId;
char* dbFName;
@ -741,6 +951,10 @@ typedef struct STimeWindow {
TSKEY ekey;
} STimeWindow;
typedef struct SQueryHint {
bool batchScan;
} SQueryHint;
typedef struct {
int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed
int32_t tsLen; // total length of ts comp block
@ -759,12 +973,18 @@ typedef struct {
int64_t offset;
} SInterval;
typedef struct {
int32_t code;
typedef struct STbVerInfo {
char tbFName[TSDB_TABLE_FNAME_LEN];
int32_t sversion;
int32_t tversion;
} STbVerInfo;
typedef struct {
int32_t code;
int64_t affectedRows;
SArray* tbVerInfo; // STbVerInfo
} SQueryTableRsp;
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
@ -939,6 +1159,9 @@ int32_t tDeserializeSVTrimDbReq(void* buf, int32_t bufLen, SVTrimDbReq* pReq);
typedef struct {
int32_t timestampSec;
int32_t ttlDropMaxCount;
int32_t nUids;
SArray* pTbUids;
} SVDropTtlTableReq;
int32_t tSerializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq);
@ -1163,6 +1386,9 @@ typedef struct {
int32_t vgId;
int8_t syncState;
int8_t syncRestore;
int64_t syncTerm;
int64_t roleTimeMs;
int64_t startTimeMs;
int8_t syncCanRead;
int64_t cacheUsage;
int64_t numOfTables;
@ -1176,12 +1402,13 @@ typedef struct {
int64_t numOfBatchInsertReqs;
int64_t numOfBatchInsertSuccessReqs;
int32_t numOfCachedTables;
int32_t learnerProgress; // use one reservered
} SVnodeLoad;
typedef struct {
int8_t syncState;
int8_t syncRestore;
int8_t syncState;
int64_t syncTerm;
int8_t syncRestore;
int64_t roleTimeMs;
} SMnodeLoad;
@ -1191,6 +1418,7 @@ typedef struct {
int64_t numOfProcessedCQuery;
int64_t numOfProcessedFetch;
int64_t numOfProcessedDrop;
int64_t numOfProcessedNotify;
int64_t numOfProcessedHb;
int64_t numOfProcessedDelete;
int64_t cacheDataSize;
@ -1317,6 +1545,7 @@ typedef struct {
int8_t learnerReplica;
int8_t learnerSelfIndex;
SReplica learnerReplicas[TSDB_MAX_LEARNER_REPLICA];
int32_t changeVersion;
} SCreateVnodeReq;
int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq);
@ -1391,7 +1620,8 @@ typedef struct {
int8_t learnerSelfIndex;
int8_t learnerReplica;
SReplica learnerReplicas[TSDB_MAX_LEARNER_REPLICA];
} SAlterVnodeReplicaReq, SAlterVnodeTypeReq;
int32_t changeVersion;
} SAlterVnodeReplicaReq, SAlterVnodeTypeReq, SCheckLearnCatchupReq;
int32_t tSerializeSAlterVnodeReplicaReq(void* buf, int32_t bufLen, SAlterVnodeReplicaReq* pReq);
int32_t tDeserializeSAlterVnodeReplicaReq(void* buf, int32_t bufLen, SAlterVnodeReplicaReq* pReq);
@ -1409,7 +1639,8 @@ typedef struct {
int32_t dstVgId;
uint32_t hashBegin;
uint32_t hashEnd;
int64_t reserved;
int32_t changeVersion;
int32_t reserved;
} SAlterVnodeHashRangeReq;
int32_t tSerializeSAlterVnodeHashRangeReq(void* buf, int32_t bufLen, SAlterVnodeHashRangeReq* pReq);
@ -1831,12 +2062,26 @@ typedef struct {
int32_t tversion;
} SResReadyRsp;
typedef struct SOperatorParam {
int32_t opType;
int32_t downstreamIdx;
void* value;
SArray* pChildren; //SArray<SOperatorParam*>
} SOperatorParam;
typedef struct STableScanOperatorParam {
bool tableSeq;
SArray* pUidList;
} STableScanOperatorParam;
typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t taskId;
int32_t execId;
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t taskId;
int32_t execId;
SOperatorParam* pOpParam;
} SResFetchReq;
int32_t tSerializeSResFetchReq(void* buf, int32_t bufLen, SResFetchReq* pReq);
@ -1915,8 +2160,24 @@ typedef struct {
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
int32_t tSerializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
int32_t tDeserializeSTaskDropReq(void* buf, int32_t bufLen, STaskDropReq* pReq);
typedef enum {
TASK_NOTIFY_FINISHED = 1,
} ETaskNotifyType;
typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t taskId;
int64_t refId;
int32_t execId;
ETaskNotifyType type;
} STaskNotifyReq;
int32_t tSerializeSTaskNotifyReq(void* buf, int32_t bufLen, STaskNotifyReq* pReq);
int32_t tDeserializeSTaskNotifyReq(void* buf, int32_t bufLen, STaskNotifyReq* pReq);
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
int32_t tDeserializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);

View File

@ -65,7 +65,7 @@ enum {
#define TD_NEW_MSG_SEG(TYPE) TYPE = ((TYPE##_SEG_CODE) << 8),
#define TD_DEF_MSG_TYPE(TYPE, MSG, REQ, RSP) TYPE, TYPE##_RSP,
enum {
enum { // WARN: new msg should be appended to segment tail
#endif
TD_NEW_MSG_SEG(TDMT_DND_MSG)
TD_DEF_MSG_TYPE(TDMT_DND_CREATE_MNODE, "dnode-create-mnode", NULL, NULL)
@ -85,18 +85,19 @@ enum {
TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_MNODE_TYPE, "dnode-alter-mnode-type", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_VNODE_TYPE, "dnode-alter-vnode-type", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, "dnode-check-vnode-learner-catchup", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_MND_MSG)
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "create-acct", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_ACCT, "create-acct", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_ACCT, "alter-acct", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_ACCT, "drop-acct", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_USER, "create-user", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_USER, "create-user", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_USER, "alter-user", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_USER, "drop-user", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_USER_AUTH, "get-user-auth", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_USER, "drop-user", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GET_USER_AUTH, "get-user-auth", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_DNODE, "create-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CONFIG_DNODE, "config-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CONFIG_DNODE, "config-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_DNODE, "drop-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_MNODE, "create-mnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_MNODE, "alter-mnode", NULL, NULL)
@ -181,6 +182,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_RESTORE_DNODE, "restore-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_PAUSE_STREAM, "pause-stream", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_RESUME_STREAM, "resume-stream", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TRIM_DB_TIMER, "trim-db-tmr", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_MSG)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
@ -242,6 +244,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_SCH_DROP_TASK, "drop-task", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_EXPLAIN, "explain", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_LINK_BROKEN, "link-broken", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_TASK_NOTIFY, "task-notify", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SCH_MAX_MSG, "sch-max", NULL, NULL)
@ -254,7 +257,6 @@ enum {
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY, "stream-scan-history", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY_FINISH, "stream-scan-history-finish", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TRANSFER_STATE, "stream-transfer-state", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECK, "stream-task-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT, "stream-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
@ -296,7 +298,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_SYNC_PRE_SNAPSHOT_REPLY, "sync-pre-snapshot-reply", NULL, NULL) // no longer used
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_FORCE_FOLLOWER, "sync-force-become-follower", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_STREAM_MSG)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_TRIGGER, "vnode-stream-trigger", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_STREAM_SCAN_HISTORY, "vnode-stream-scan-history", NULL, NULL)

View File

@ -113,7 +113,7 @@
#define TK_TABLE_PREFIX 95
#define TK_TABLE_SUFFIX 96
#define TK_NK_COLON 97
#define TK_MAX_SPEED 98
#define TK_BWLIMIT 98
#define TK_START 99
#define TK_TIMESTAMP 100
#define TK_END 101
@ -276,91 +276,96 @@
#define TK_JOIN 258
#define TK_INNER 259
#define TK_SELECT 260
#define TK_DISTINCT 261
#define TK_WHERE 262
#define TK_PARTITION 263
#define TK_BY 264
#define TK_SESSION 265
#define TK_STATE_WINDOW 266
#define TK_EVENT_WINDOW 267
#define TK_SLIDING 268
#define TK_FILL 269
#define TK_VALUE 270
#define TK_VALUE_F 271
#define TK_NONE 272
#define TK_PREV 273
#define TK_NULL_F 274
#define TK_LINEAR 275
#define TK_NEXT 276
#define TK_HAVING 277
#define TK_RANGE 278
#define TK_EVERY 279
#define TK_ORDER 280
#define TK_SLIMIT 281
#define TK_SOFFSET 282
#define TK_LIMIT 283
#define TK_OFFSET 284
#define TK_ASC 285
#define TK_NULLS 286
#define TK_ABORT 287
#define TK_AFTER 288
#define TK_ATTACH 289
#define TK_BEFORE 290
#define TK_BEGIN 291
#define TK_BITAND 292
#define TK_BITNOT 293
#define TK_BITOR 294
#define TK_BLOCKS 295
#define TK_CHANGE 296
#define TK_COMMA 297
#define TK_CONCAT 298
#define TK_CONFLICT 299
#define TK_COPY 300
#define TK_DEFERRED 301
#define TK_DELIMITERS 302
#define TK_DETACH 303
#define TK_DIVIDE 304
#define TK_DOT 305
#define TK_EACH 306
#define TK_FAIL 307
#define TK_FILE 308
#define TK_FOR 309
#define TK_GLOB 310
#define TK_ID 311
#define TK_IMMEDIATE 312
#define TK_IMPORT 313
#define TK_INITIALLY 314
#define TK_INSTEAD 315
#define TK_ISNULL 316
#define TK_KEY 317
#define TK_MODULES 318
#define TK_NK_BITNOT 319
#define TK_NK_SEMI 320
#define TK_NOTNULL 321
#define TK_OF 322
#define TK_PLUS 323
#define TK_PRIVILEGE 324
#define TK_RAISE 325
#define TK_RESTRICT 326
#define TK_ROW 327
#define TK_SEMI 328
#define TK_STAR 329
#define TK_STATEMENT 330
#define TK_STRICT 331
#define TK_STRING 332
#define TK_TIMES 333
#define TK_VALUES 334
#define TK_VARIABLE 335
#define TK_VIEW 336
#define TK_WAL 337
#define TK_NK_HINT 261
#define TK_DISTINCT 262
#define TK_WHERE 263
#define TK_PARTITION 264
#define TK_BY 265
#define TK_SESSION 266
#define TK_STATE_WINDOW 267
#define TK_EVENT_WINDOW 268
#define TK_SLIDING 269
#define TK_FILL 270
#define TK_VALUE 271
#define TK_VALUE_F 272
#define TK_NONE 273
#define TK_PREV 274
#define TK_NULL_F 275
#define TK_LINEAR 276
#define TK_NEXT 277
#define TK_HAVING 278
#define TK_RANGE 279
#define TK_EVERY 280
#define TK_ORDER 281
#define TK_SLIMIT 282
#define TK_SOFFSET 283
#define TK_LIMIT 284
#define TK_OFFSET 285
#define TK_ASC 286
#define TK_NULLS 287
#define TK_ABORT 288
#define TK_AFTER 289
#define TK_ATTACH 290
#define TK_BEFORE 291
#define TK_BEGIN 292
#define TK_BITAND 293
#define TK_BITNOT 294
#define TK_BITOR 295
#define TK_BLOCKS 296
#define TK_CHANGE 297
#define TK_COMMA 298
#define TK_CONCAT 299
#define TK_CONFLICT 300
#define TK_COPY 301
#define TK_DEFERRED 302
#define TK_DELIMITERS 303
#define TK_DETACH 304
#define TK_DIVIDE 305
#define TK_DOT 306
#define TK_EACH 307
#define TK_FAIL 308
#define TK_FILE 309
#define TK_FOR 310
#define TK_GLOB 311
#define TK_ID 312
#define TK_IMMEDIATE 313
#define TK_IMPORT 314
#define TK_INITIALLY 315
#define TK_INSTEAD 316
#define TK_ISNULL 317
#define TK_KEY 318
#define TK_MODULES 319
#define TK_NK_BITNOT 320
#define TK_NK_SEMI 321
#define TK_NOTNULL 322
#define TK_OF 323
#define TK_PLUS 324
#define TK_PRIVILEGE 325
#define TK_RAISE 326
#define TK_RESTRICT 327
#define TK_ROW 328
#define TK_SEMI 329
#define TK_STAR 330
#define TK_STATEMENT 331
#define TK_STRICT 332
#define TK_STRING 333
#define TK_TIMES 334
#define TK_VALUES 335
#define TK_VARIABLE 336
#define TK_VIEW 337
#define TK_WAL 338
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601
#define TK_NK_ILLEGAL 602
#define TK_NK_HEX 603 // hex number 0x123
#define TK_NK_OCT 604 // oct number
#define TK_NK_BIN 605 // bin format data 0b111
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601
#define TK_NK_ILLEGAL 602
#define TK_NK_HEX 603 // hex number 0x123
#define TK_NK_OCT 604 // oct number
#define TK_NK_BIN 605 // bin format data 0b111
#define TK_BATCH_SCAN 606
#define TK_NO_BATCH_SCAN 607
#define TK_NK_NIL 65535

View File

@ -269,8 +269,8 @@ typedef struct {
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
#define IS_VAR_DATA_TYPE(t) \
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_AUDIT_H_
#define _TD_AUDIT_H_
#include "tarray.h"
#include "tdef.h"
#include "tlog.h"
#include "tmsg.h"
#include "tjson.h"
#include "tmsgcb.h"
#include "trpc.h"
#include "mnode.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
const char *server;
uint16_t port;
bool comp;
} SAuditCfg;
int32_t auditInit(const SAuditCfg *pCfg);
void auditSend(SJson *pJson);
void auditRecord(SRpcMsg *pReq, int64_t clusterId, char *operation, char *target1, char *target2, char *detail);
#ifdef __cplusplus
}
#endif
#endif /*_TD_MONITOR_H_*/

View File

@ -59,7 +59,7 @@ typedef struct SDataSinkMgtCfg {
uint32_t maxDataBlockNumPerQuery;
} SDataSinkMgtCfg;
int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg, SStorageAPI* pAPI);
int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg, SStorageAPI* pAPI, void** ppSinkManager);
typedef struct SInputData {
const struct SSDataBlock* pData;
@ -83,7 +83,7 @@ typedef struct SOutputData {
* @param pHandle output
* @return error code
*/
int32_t dsCreateDataSinker(const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, void* pParam, const char* id);
int32_t dsCreateDataSinker(void* pSinkManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, void* pParam, const char* id);
int32_t dsDataSinkGetCacheSize(SDataSinkStat* pStat);
@ -97,6 +97,8 @@ int32_t dsPutDataBlock(DataSinkHandle handle, const SInputData* pInput, bool* pC
void dsEndPut(DataSinkHandle handle, uint64_t useconds);
void dsReset(DataSinkHandle handle);
/**
* Get the length of the data returned by the next call to dsGetDataBlock.
* @param handle

View File

@ -130,6 +130,10 @@ int32_t qSetSMAInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks,
*/
int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd);
bool qIsDynamicExecTask(qTaskInfo_t tinfo);
void qUpdateOperatorParam(qTaskInfo_t tinfo, void* pParam);
/**
* Create the exec task object according to task json
* @param readHandle
@ -150,7 +154,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
* @return
*/
int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion,
int32_t* tversion);
int32_t* tversion, int32_t idx);
/**
* The main task execution function, including query on both table and multiple tables,

View File

@ -98,6 +98,18 @@ typedef struct SMTbCursor {
int8_t paused;
} SMTbCursor;
typedef struct SMCtbCursor {
SMeta *pMeta;
void *pCur;
tb_uid_t suid;
void *pKey;
void *pVal;
int kLen;
int vLen;
int8_t paused;
int lock;
} SMCtbCursor;
typedef struct SRowBuffPos {
void* pRowBuff;
void* pKey;
@ -278,13 +290,17 @@ typedef struct SStoreMeta {
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
// metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
int64_t (*getNumOfRowsInMem)(void* pVnode);
/**
int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list);
*/
SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock);
int32_t (*resumeCtbCursor)(SMCtbCursor* pCtbCur, int8_t first);
void (*pauseCtbCursor)(SMCtbCursor* pCtbCur);
void (*closeCtbCursor)(SMCtbCursor *pCtbCur);
tb_uid_t (*ctbCursorNext)(SMCtbCursor* pCur);
} SStoreMeta;
typedef struct SStoreMetaReader {

View File

@ -122,6 +122,8 @@ typedef enum EFunctionType {
FUNCTION_TYPE_IROWTS,
FUNCTION_TYPE_ISFILLED,
FUNCTION_TYPE_TAGS,
FUNCTION_TYPE_TBUID,
FUNCTION_TYPE_VGID,
// internal function
FUNCTION_TYPE_SELECT_VALUE = 3750,
@ -157,6 +159,8 @@ typedef enum EFunctionType {
FUNCTION_TYPE_AVG_MERGE,
FUNCTION_TYPE_STDDEV_PARTIAL,
FUNCTION_TYPE_STDDEV_MERGE,
FUNCTION_TYPE_IRATE_PARTIAL,
FUNCTION_TYPE_IRATE_MERGE,
// geometry functions
FUNCTION_TYPE_GEOM_FROM_TEXT = 4250,

View File

@ -109,8 +109,8 @@ typedef uint16_t VarDataLenT; // maxVarDataLen: 65535
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
#define IS_VAR_DATA_TYPE(t) \
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
(((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON) || ((t) == TSDB_DATA_TYPE_GEOMETRY))
#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_VARBINARY) || ((t) == TSDB_DATA_TYPE_NCHAR))
static FORCE_INLINE char *udfColDataGetData(const SUdfColumn *pColumn, int32_t row) {
if (IS_VAR_DATA_TYPE(pColumn->colMeta.type)) {

View File

@ -21,6 +21,7 @@ extern "C" {
#endif
#include "tdef.h"
#include "tmsg.h"
#define nodeType(nodeptr) (((const SNode*)(nodeptr))->type)
#define setNodeType(nodeptr, nodetype) (((SNode*)(nodeptr))->type = (nodetype))
@ -78,208 +79,6 @@ extern "C" {
(list) = NULL; \
} while (0)
typedef enum ENodeType {
// Syntax nodes are used in parser and planner module, and some are also used in executor module, such as COLUMN,
// VALUE, OPERATOR, FUNCTION and so on.
QUERY_NODE_COLUMN = 1,
QUERY_NODE_VALUE,
QUERY_NODE_OPERATOR,
QUERY_NODE_LOGIC_CONDITION,
QUERY_NODE_FUNCTION,
QUERY_NODE_REAL_TABLE,
QUERY_NODE_TEMP_TABLE,
QUERY_NODE_JOIN_TABLE,
QUERY_NODE_GROUPING_SET,
QUERY_NODE_ORDER_BY_EXPR,
QUERY_NODE_LIMIT,
QUERY_NODE_STATE_WINDOW,
QUERY_NODE_SESSION_WINDOW,
QUERY_NODE_INTERVAL_WINDOW,
QUERY_NODE_NODE_LIST,
QUERY_NODE_FILL,
QUERY_NODE_RAW_EXPR, // Only be used in parser module.
QUERY_NODE_TARGET,
QUERY_NODE_DATABLOCK_DESC,
QUERY_NODE_SLOT_DESC,
QUERY_NODE_COLUMN_DEF,
QUERY_NODE_DOWNSTREAM_SOURCE,
QUERY_NODE_DATABASE_OPTIONS,
QUERY_NODE_TABLE_OPTIONS,
QUERY_NODE_INDEX_OPTIONS,
QUERY_NODE_EXPLAIN_OPTIONS,
QUERY_NODE_STREAM_OPTIONS,
QUERY_NODE_LEFT_VALUE,
QUERY_NODE_COLUMN_REF,
QUERY_NODE_WHEN_THEN,
QUERY_NODE_CASE_WHEN,
QUERY_NODE_EVENT_WINDOW,
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100,
QUERY_NODE_SELECT_STMT,
QUERY_NODE_VNODE_MODIFY_STMT,
QUERY_NODE_CREATE_DATABASE_STMT,
QUERY_NODE_DROP_DATABASE_STMT,
QUERY_NODE_ALTER_DATABASE_STMT,
QUERY_NODE_FLUSH_DATABASE_STMT,
QUERY_NODE_TRIM_DATABASE_STMT,
QUERY_NODE_CREATE_TABLE_STMT,
QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
QUERY_NODE_CREATE_MULTI_TABLES_STMT,
QUERY_NODE_DROP_TABLE_CLAUSE,
QUERY_NODE_DROP_TABLE_STMT,
QUERY_NODE_DROP_SUPER_TABLE_STMT,
QUERY_NODE_ALTER_TABLE_STMT,
QUERY_NODE_ALTER_SUPER_TABLE_STMT,
QUERY_NODE_CREATE_USER_STMT,
QUERY_NODE_ALTER_USER_STMT,
QUERY_NODE_DROP_USER_STMT,
QUERY_NODE_USE_DATABASE_STMT,
QUERY_NODE_CREATE_DNODE_STMT,
QUERY_NODE_DROP_DNODE_STMT,
QUERY_NODE_ALTER_DNODE_STMT,
QUERY_NODE_CREATE_INDEX_STMT,
QUERY_NODE_DROP_INDEX_STMT,
QUERY_NODE_CREATE_QNODE_STMT,
QUERY_NODE_DROP_QNODE_STMT,
QUERY_NODE_CREATE_BNODE_STMT,
QUERY_NODE_DROP_BNODE_STMT,
QUERY_NODE_CREATE_SNODE_STMT,
QUERY_NODE_DROP_SNODE_STMT,
QUERY_NODE_CREATE_MNODE_STMT,
QUERY_NODE_DROP_MNODE_STMT,
QUERY_NODE_CREATE_TOPIC_STMT,
QUERY_NODE_DROP_TOPIC_STMT,
QUERY_NODE_DROP_CGROUP_STMT,
QUERY_NODE_ALTER_LOCAL_STMT,
QUERY_NODE_EXPLAIN_STMT,
QUERY_NODE_DESCRIBE_STMT,
QUERY_NODE_RESET_QUERY_CACHE_STMT,
QUERY_NODE_COMPACT_DATABASE_STMT,
QUERY_NODE_CREATE_FUNCTION_STMT,
QUERY_NODE_DROP_FUNCTION_STMT,
QUERY_NODE_CREATE_STREAM_STMT,
QUERY_NODE_DROP_STREAM_STMT,
QUERY_NODE_BALANCE_VGROUP_STMT,
QUERY_NODE_MERGE_VGROUP_STMT,
QUERY_NODE_REDISTRIBUTE_VGROUP_STMT,
QUERY_NODE_SPLIT_VGROUP_STMT,
QUERY_NODE_SYNCDB_STMT,
QUERY_NODE_GRANT_STMT,
QUERY_NODE_REVOKE_STMT,
QUERY_NODE_SHOW_DNODES_STMT,
QUERY_NODE_SHOW_MNODES_STMT,
QUERY_NODE_SHOW_MODULES_STMT,
QUERY_NODE_SHOW_QNODES_STMT,
QUERY_NODE_SHOW_SNODES_STMT,
QUERY_NODE_SHOW_BNODES_STMT,
QUERY_NODE_SHOW_CLUSTER_STMT,
QUERY_NODE_SHOW_DATABASES_STMT,
QUERY_NODE_SHOW_FUNCTIONS_STMT,
QUERY_NODE_SHOW_INDEXES_STMT,
QUERY_NODE_SHOW_STABLES_STMT,
QUERY_NODE_SHOW_STREAMS_STMT,
QUERY_NODE_SHOW_TABLES_STMT,
QUERY_NODE_SHOW_TAGS_STMT,
QUERY_NODE_SHOW_USERS_STMT,
QUERY_NODE_SHOW_LICENCES_STMT,
QUERY_NODE_SHOW_VGROUPS_STMT,
QUERY_NODE_SHOW_TOPICS_STMT,
QUERY_NODE_SHOW_CONSUMERS_STMT,
QUERY_NODE_SHOW_CONNECTIONS_STMT,
QUERY_NODE_SHOW_QUERIES_STMT,
QUERY_NODE_SHOW_APPS_STMT,
QUERY_NODE_SHOW_VARIABLES_STMT,
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_USER_PRIVILEGES_STMT,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
QUERY_NODE_SHOW_SCORES_STMT,
QUERY_NODE_SHOW_TABLE_TAGS_STMT,
QUERY_NODE_KILL_CONNECTION_STMT,
QUERY_NODE_KILL_QUERY_STMT,
QUERY_NODE_KILL_TRANSACTION_STMT,
QUERY_NODE_DELETE_STMT,
QUERY_NODE_INSERT_STMT,
QUERY_NODE_QUERY,
QUERY_NODE_SHOW_DB_ALIVE_STMT,
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
QUERY_NODE_RESTORE_DNODE_STMT,
QUERY_NODE_RESTORE_QNODE_STMT,
QUERY_NODE_RESTORE_MNODE_STMT,
QUERY_NODE_RESTORE_VNODE_STMT,
QUERY_NODE_PAUSE_STREAM_STMT,
QUERY_NODE_RESUME_STREAM_STMT,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
QUERY_NODE_LOGIC_PLAN_JOIN,
QUERY_NODE_LOGIC_PLAN_AGG,
QUERY_NODE_LOGIC_PLAN_PROJECT,
QUERY_NODE_LOGIC_PLAN_VNODE_MODIFY,
QUERY_NODE_LOGIC_PLAN_EXCHANGE,
QUERY_NODE_LOGIC_PLAN_MERGE,
QUERY_NODE_LOGIC_PLAN_WINDOW,
QUERY_NODE_LOGIC_PLAN_FILL,
QUERY_NODE_LOGIC_PLAN_SORT,
QUERY_NODE_LOGIC_PLAN_PARTITION,
QUERY_NODE_LOGIC_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_LOGIC_PLAN_INTERP_FUNC,
QUERY_NODE_LOGIC_SUBPLAN,
QUERY_NODE_LOGIC_PLAN,
// physical plan node
QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN,
QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN,
QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN,
QUERY_NODE_PHYSICAL_PLAN_PROJECT,
QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN,
QUERY_NODE_PHYSICAL_PLAN_HASH_AGG,
QUERY_NODE_PHYSICAL_PLAN_EXCHANGE,
QUERY_NODE_PHYSICAL_PLAN_MERGE,
QUERY_NODE_PHYSICAL_PLAN_SORT,
QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT,
QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_FILL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION,
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
QUERY_NODE_PHYSICAL_PLAN_INSERT,
QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT,
QUERY_NODE_PHYSICAL_PLAN_DELETE,
QUERY_NODE_PHYSICAL_SUBPLAN,
QUERY_NODE_PHYSICAL_PLAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT
} ENodeType;
/**
* The first field of a node of any type is guaranteed to be the ENodeType.
* Hence the type of any node can be gotten by casting it to SNode.

View File

@ -42,10 +42,13 @@ typedef enum EGroupAction {
typedef struct SLogicNode {
ENodeType type;
bool dynamicOp;
bool stmtRoot;
SNodeList* pTargets; // SColumnNode
SNode* pConditions;
SNodeList* pChildren;
struct SLogicNode* pParent;
SNodeList* pHint;
int32_t optimizedFlag;
uint8_t precision;
SNode* pLimit;
@ -107,15 +110,21 @@ typedef struct SScanLogicNode {
bool sortPrimaryKey;
bool igLastNull;
bool groupOrderScan;
bool onlyMetaCtbIdx; // for tag scan with no tbname
} SScanLogicNode;
typedef struct SJoinLogicNode {
SLogicNode node;
EJoinType joinType;
SNode* pMergeCondition;
SNode* pOnConditions;
bool isSingleTableJoin;
SNode* pColEqualOnConditions;
SLogicNode node;
EJoinType joinType;
EJoinAlgorithm joinAlgo;
SNode* pPrimKeyEqCond;
SNode* pColEqCond;
SNode* pTagEqCond;
SNode* pTagOnCond;
SNode* pOtherOnCond;
bool isSingleTableJoin;
bool hasSubQuery;
bool isLowLevelJoin;
} SJoinLogicNode;
typedef struct SAggLogicNode {
@ -154,6 +163,28 @@ typedef struct SInterpFuncLogicNode {
SNode* pTimeSeries; // SColumnNode
} SInterpFuncLogicNode;
typedef struct SGroupCacheLogicNode {
SLogicNode node;
bool grpColsMayBeNull;
bool grpByUid;
bool globalGrp;
bool batchFetch;
SNodeList* pGroupCols;
} SGroupCacheLogicNode;
typedef struct SDynQueryCtrlStbJoin {
bool batchFetch;
SNodeList* pVgList;
SNodeList* pUidList;
bool srcScan[2];
} SDynQueryCtrlStbJoin;
typedef struct SDynQueryCtrlLogicNode {
SLogicNode node;
EDynQueryType qType;
SDynQueryCtrlStbJoin stbJoin;
} SDynQueryCtrlLogicNode;
typedef enum EModifyTableType { MODIFY_TABLE_TYPE_INSERT = 1, MODIFY_TABLE_TYPE_DELETE } EModifyTableType;
typedef struct SVnodeModifyLogicNode {
@ -312,6 +343,7 @@ typedef struct SDataBlockDescNode {
typedef struct SPhysiNode {
ENodeType type;
bool dynamicOp;
EOrder inputTsOrder;
EOrder outputTsOrder;
SDataBlockDescNode* pOutputDataBlockDesc;
@ -334,7 +366,11 @@ typedef struct SScanPhysiNode {
bool groupOrderScan;
} SScanPhysiNode;
typedef SScanPhysiNode STagScanPhysiNode;
typedef struct STagScanPhysiNode {
SScanPhysiNode scan;
bool onlyMetaCtbIdx; //no tbname, tag index not used.
} STagScanPhysiNode;
typedef SScanPhysiNode SBlockDistScanPhysiNode;
typedef struct SLastRowScanPhysiNode {
@ -409,12 +445,50 @@ typedef struct SInterpFuncPhysiNode {
typedef struct SSortMergeJoinPhysiNode {
SPhysiNode node;
EJoinType joinType;
SNode* pMergeCondition;
SNode* pOnConditions;
SNode* pPrimKeyCond;
SNode* pColEqCond;
SNode* pOtherOnCond;
SNodeList* pTargets;
SNode* pColEqualOnConditions;
} SSortMergeJoinPhysiNode;
typedef struct SHashJoinPhysiNode {
SPhysiNode node;
EJoinType joinType;
SNodeList* pOnLeft;
SNodeList* pOnRight;
SNode* pFilterConditions;
SNodeList* pTargets;
SQueryStat inputStat[2];
SNode* pPrimKeyCond;
SNode* pColEqCond;
SNode* pTagEqCond;
} SHashJoinPhysiNode;
typedef struct SGroupCachePhysiNode {
SPhysiNode node;
bool grpColsMayBeNull;
bool grpByUid;
bool globalGrp;
bool batchFetch;
SNodeList* pGroupCols;
} SGroupCachePhysiNode;
typedef struct SStbJoinDynCtrlBasic {
bool batchFetch;
int32_t vgSlot[2];
int32_t uidSlot[2];
bool srcScan[2];
} SStbJoinDynCtrlBasic;
typedef struct SDynQueryCtrlPhysiNode {
SPhysiNode node;
EDynQueryType qType;
union {
SStbJoinDynCtrlBasic stbJoin;
};
} SDynQueryCtrlPhysiNode;
typedef struct SAggPhysiNode {
SPhysiNode node;
SNodeList* pExprs; // these are expression list of group_by_clause and parameter expression of aggregate function
@ -603,6 +677,8 @@ typedef struct SSubplan {
SNode* pTagCond;
SNode* pTagIndexCond;
bool showRewrite;
int32_t rowsThreshold;
bool dynamicRowThreshold;
} SSubplan;
typedef enum EExplainMode { EXPLAIN_MODE_DISABLE = 1, EXPLAIN_MODE_STATIC, EXPLAIN_MODE_ANALYZE } EExplainMode;

View File

@ -116,6 +116,17 @@ typedef struct SLeftValueNode {
ENodeType type;
} SLeftValueNode;
typedef enum EHintOption {
HINT_NO_BATCH_SCAN = 1,
HINT_BATCH_SCAN,
} EHintOption;
typedef struct SHintNode {
ENodeType type;
EHintOption option;
void* value;
} SHintNode;
typedef struct SOperatorNode {
SExprNode node; // QUERY_NODE_OPERATOR
EOperatorType opType;
@ -169,11 +180,27 @@ typedef struct STempTableNode {
SNode* pSubquery;
} STempTableNode;
typedef enum EJoinType { JOIN_TYPE_INNER = 1 } EJoinType;
typedef enum EJoinType {
JOIN_TYPE_INNER = 1,
JOIN_TYPE_LEFT,
JOIN_TYPE_RIGHT,
} EJoinType;
typedef enum EJoinAlgorithm {
JOIN_ALGO_UNKNOWN = 0,
JOIN_ALGO_MERGE,
JOIN_ALGO_HASH,
} EJoinAlgorithm;
typedef enum EDynQueryType {
DYN_QTYPE_STB_HASH = 1,
} EDynQueryType;
typedef struct SJoinTableNode {
STableNode table; // QUERY_NODE_JOIN_TABLE
EJoinType joinType;
bool hasSubQuery;
bool isLowLevelJoin;
SNode* pLeft;
SNode* pRight;
SNode* pOnCond;
@ -289,6 +316,7 @@ typedef struct SSelectStmt {
SLimitNode* pLimit;
SLimitNode* pSlimit;
STimeWindow timeRange;
SNodeList* pHint;
char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision;
int32_t selectFuncNum;
@ -470,7 +498,7 @@ int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char*
int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols);
typedef bool (*FFuncClassifier)(int32_t funcId);
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifier classifier, SNodeList** pFuncs);
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList** pFuncs);
int32_t nodesCollectSpecialNodes(SSelectStmt* pSelect, ESqlClause clause, ENodeType type, SNodeList** pNodes);

View File

@ -114,6 +114,7 @@ int32_t smlBuildRow(STableDataCxt* pTableCxt);
int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* kv, int32_t index);
STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta);
void clearColValArraySml(SArray* pCols);
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
char* msgBuf, int32_t msgBufLen);

View File

@ -90,11 +90,6 @@ typedef struct SExecResult {
void* res;
} SExecResult;
typedef struct STbVerInfo {
char tbFName[TSDB_TABLE_FNAME_LEN];
int32_t sversion;
int32_t tversion;
} STbVerInfo;
#pragma pack(push, 1)
typedef struct SCTableMeta {
@ -212,6 +207,11 @@ typedef struct SQueryNodeStat {
int32_t tableNum; // vg table number, unit is TSDB_TABLE_NUM_UNIT
} SQueryNodeStat;
typedef struct SQueryStat {
int64_t inputRowNum;
int32_t inputRowSize;
} SQueryStat;
int32_t initTaskQueue();
int32_t cleanupTaskQueue();

View File

@ -45,6 +45,7 @@ typedef struct {
uint64_t cqueryProcessed;
uint64_t fetchProcessed;
uint64_t dropProcessed;
uint64_t notifyProcessed;
uint64_t hbProcessed;
uint64_t deleteProcessed;
@ -90,6 +91,8 @@ int32_t qWorkerProcessCancelMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, in
int32_t qWorkerProcessDropMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessNotifyMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_t ts);
int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SDeleteRes *pRes);

View File

@ -95,6 +95,8 @@ int32_t qStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p
int32_t qEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qTbnameFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qTbUidFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qVgIdFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
/* Aggregation functions */
int32_t countScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);

View File

@ -122,6 +122,7 @@ typedef struct {
int8_t type;
int32_t srcVgId;
int32_t srcTaskId;
int32_t childId;
int64_t sourceVer;
int64_t reqId;
@ -188,7 +189,7 @@ int32_t streamInit();
void streamCleanUp();
SStreamQueue* streamQueueOpen(int64_t cap);
void streamQueueClose(SStreamQueue* queue);
void streamQueueClose(SStreamQueue* pQueue, int32_t taskId);
static FORCE_INLINE void streamQueueProcessSuccess(SStreamQueue* queue) {
ASSERT(atomic_load_8(&queue->status) == STREAM_QUEUE__PROCESSING);
@ -251,6 +252,7 @@ typedef struct SStreamChildEpInfo {
int32_t nodeId;
int32_t childId;
int32_t taskId;
int8_t dataAllowed;
SEpSet epSet;
} SStreamChildEpInfo;
@ -272,6 +274,7 @@ typedef struct SStreamStatus {
int8_t schedStatus;
int8_t keepTaskStatus;
bool transferState;
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
int8_t timerActive; // timer is active
int8_t pauseAllowed; // allowed task status to be set to be paused
} SStreamStatus;
@ -399,8 +402,9 @@ typedef struct {
typedef struct {
int64_t streamId;
int32_t type;
int32_t taskId;
int32_t dataSrcVgId;
int32_t srcVgId;
int32_t upstreamTaskId;
int32_t upstreamChildId;
int32_t upstreamNodeId;
@ -570,8 +574,6 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq);
int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, int32_t numOfBlocks,
int64_t dstTaskId);
void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
@ -579,6 +581,8 @@ int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
int32_t streamProcessRunReq(SStreamTask* pTask);
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec);
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code);
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
@ -589,7 +593,6 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock
bool streamTaskShouldStop(const SStreamStatus* pStatus);
bool streamTaskShouldPause(const SStreamStatus* pStatus);
bool streamTaskIsIdle(const SStreamTask* pTask);
int32_t streamTaskEndScanWAL(SStreamTask* pTask);
SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId);
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize);
@ -626,7 +629,7 @@ int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* p
int32_t streamSourceScanHistoryData(SStreamTask* pTask);
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
int32_t streamDispatchTransferStateMsg(SStreamTask* pTask);
int32_t appendTranstateIntoInputQ(SStreamTask* pTask);
// agg level
int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask);

View File

@ -101,6 +101,7 @@ typedef struct SSyncCfg {
int32_t myIndex;
SNodeInfo nodeInfo[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
SyncIndex lastIndex;
int32_t changeVersion;
} SSyncCfg;
typedef struct SFsmCbMeta {
@ -239,19 +240,22 @@ typedef struct SSyncState {
ESyncState state;
bool restored;
bool canRead;
int32_t progress;
SyncTerm term;
int64_t roleTimeMs;
int64_t startTimeMs;
} SSyncState;
int32_t syncInit();
void syncCleanUp();
int64_t syncOpen(SSyncInfo* pSyncInfo);
int32_t syncStart(int64_t rid);
void syncStop(int64_t rid);
void syncPreStop(int64_t rid);
void syncPostStop(int64_t rid);
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq);
int32_t syncIsCatchUp(int64_t rid);
int32_t syncInit();
void syncCleanUp();
int64_t syncOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion);
int32_t syncStart(int64_t rid);
void syncStop(int64_t rid);
void syncPreStop(int64_t rid);
void syncPostStop(int64_t rid);
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq);
int32_t syncCheckMember(int64_t rid);
int32_t syncIsCatchUp(int64_t rid);
ESyncRole syncGetRole(int64_t rid);
int32_t syncProcessMsg(int64_t rid, SRpcMsg* pMsg);
int32_t syncReconfig(int64_t rid, SSyncCfg* pCfg);
@ -269,6 +273,8 @@ SSyncState syncGetState(int64_t rid);
void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet);
const char* syncStr(ESyncState state);
int32_t syncNodeGetConfig(int64_t rid, SSyncCfg *cfg);
#ifdef __cplusplus
}
#endif

View File

@ -83,7 +83,7 @@ void taosRemoveDir(const char *dirname);
bool taosDirExist(const char *dirname);
int32_t taosMkDir(const char *dirname);
int32_t taosMulMkDir(const char *dirname);
int32_t taosMulModeMkDir(const char *dirname, int mode);
int32_t taosMulModeMkDir(const char *dirname, int mode, bool checkAccess);
void taosRemoveOldFiles(const char *dirname, int32_t keepDays);
int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen);
int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen);

View File

@ -76,7 +76,7 @@ int32_t taosUnLockFile(TdFilePtr pFile);
int32_t taosUmaskFile(int32_t maskVal);
int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime);
int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime);
int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno);
int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime);
bool taosCheckExistFile(const char *pathname);

Some files were not shown because too many files have changed in this diff Show More