Merge branch '3.0' into szhou/tsdb-neigh-block

This commit is contained in:
shenglian-zhou 2023-08-24 10:58:23 +08:00 committed by GitHub
commit 1965703d53
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
290 changed files with 24714 additions and 9992 deletions

View File

@ -0,0 +1,19 @@
# apr-util
ExternalProject_Add(aprutil-1
URL https://dlcdn.apache.org//apr/apr-util-1.6.3.tar.gz
URL_HASH SHA256=2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/apache/apr-util.git
#GIT_TAG 1.5.4
SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1 --without-expat
#CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)

View File

@ -0,0 +1,19 @@
# apr
ExternalProject_Add(apr-1
URL https://dlcdn.apache.org//apr/apr-1.7.4.tar.gz
URL_HASH SHA256=a4137dd82a185076fa50ba54232d920a17c6469c30b0876569e1c2a05ff311d9
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/apache/apr.git
#GIT_TAG 1.5.2
SOURCE_DIR "${TD_CONTRIB_DIR}/apr"
BUILD_IN_SOURCE TRUE
UPDATE_DISCONNECTED TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --enable-shared=no
#CONFIGURE_COMMAND ./configure
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)

View File

@ -1,5 +1,4 @@
cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE ON)
set(TD_BUILD_TAOSA_INTERNAL FALSE)
@ -78,6 +77,12 @@ ELSE ()
SET(TD_TAOS_TOOLS TRUE)
ENDIF ()
IF (${TD_WINDOWS})
SET(TAOS_LIB taos_static)
ELSE ()
SET(TAOS_LIB taos)
ENDIF ()
IF (TD_WINDOWS)
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")

View File

@ -125,6 +125,16 @@ option(
ON
)
IF(${TD_LINUX})
option(
BUILD_WITH_COS
"If build with cos"
ON
)
ENDIF ()
option(
BUILD_WITH_SQLITE
"If build with sqlite"

View File

@ -0,0 +1,12 @@
# cos
ExternalProject_Add(cos
GIT_REPOSITORY https://github.com/tencentyun/cos-c-sdk-v5.git
GIT_TAG v5.0.16
SOURCE_DIR "${TD_CONTRIB_DIR}/cos-c-sdk-v5"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@ -0,0 +1,17 @@
# curl
ExternalProject_Add(curl
URL https://curl.se/download/curl-8.2.1.tar.gz
DOWNLOAD_NO_PROGRESS 1
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
#GIT_REPOSITORY https://github.com/curl/curl.git
#GIT_TAG curl-7_88_1
SOURCE_DIR "${TD_CONTRIB_DIR}/curl"
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd
#CONFIGURE_COMMAND ./configure --without-ssl
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)

View File

@ -0,0 +1,14 @@
# cos
ExternalProject_Add(mxml
GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git
GIT_TAG release-2.10
SOURCE_DIR "${TD_CONTRIB_DIR}/mxml"
#BINARY_DIR ""
BUILD_IN_SOURCE TRUE
#UPDATE_COMMAND ""
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --enable-shared=no
#CONFIGURE_COMMAND ./configure
BUILD_COMMAND make
INSTALL_COMMAND make install
TEST_COMMAND ""
)

View File

@ -6,6 +6,39 @@ function(cat IN_FILE OUT_FILE)
file(APPEND ${OUT_FILE} "${CONTENTS}")
endfunction(cat IN_FILE OUT_FILE)
if(${TD_LINUX})
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
if(${BUILD_WITH_COS})
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
endif(${BUILD_WITH_COS})
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
if(${BUILD_WITH_COS})
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
endif(${BUILD_WITH_COS})
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
endif(${TD_LINUX})
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -122,6 +155,16 @@ if(${BUILD_WITH_SQLITE})
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
endif(${BUILD_WITH_SQLITE})
# cos
if(${BUILD_WITH_COS})
#cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
#cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
add_definitions(-DUSE_COS)
endif(${BUILD_WITH_COS})
# lucene
if(${BUILD_WITH_LUCENE})
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
@ -347,6 +390,31 @@ if (${BUILD_WITH_ROCKSDB})
endif()
endif()
# cos
if(${BUILD_WITH_COS})
if(${TD_LINUX})
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
#ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
option(ENABLE_TEST "Enable the tests" OFF)
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
MESSAGE("$ENV{HOME}/.cos-local.1/include")
set(CMAKE_BUILD_TYPE debug)
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
set(CMAKE_PROJECT_NAME cos_c_sdk)
add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL)
target_include_directories(
cos_c_sdk
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cos-c-sdk-v5/cos_c_sdk>
)
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
else()
endif(${TD_LINUX})
endif(${BUILD_WITH_COS})
# lucene
# To support build on ubuntu: sudo apt-get install libboost-all-dev
if(${BUILD_WITH_LUCENE})

View File

@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB})
add_subdirectory(rocksdb)
endif(${BUILD_WITH_ROCKSDB})
# cos
if(${BUILD_WITH_COS})
add_subdirectory(cos)
endif(${BUILD_WITH_COS})
if(${BUILD_WITH_LUCENE})
add_subdirectory(lucene)
endif(${BUILD_WITH_LUCENE})

View File

@ -0,0 +1,49 @@
add_executable(cosTest "")
target_sources(cosTest
PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
)
#find_path(APR_INCLUDE_DIR apr-1/apr_time.h)
#find_path(APR_UTIL_INCLUDE_DIR apr/include/apr-1/apr_md5.h)
#find_path(MINIXML_INCLUDE_DIR mxml.h)
#find_path(CURL_INCLUDE_DIR curl/curl.h)
#include_directories (${MINIXML_INCLUDE_DIR})
#include_directories (${CURL_INCLUDE_DIR})
FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
#FIND_PROGRAM(APU_CONFIG_BIN NAMES apu-config apu-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
IF (APR_CONFIG_BIN)
EXECUTE_PROCESS(
COMMAND ${APR_CONFIG_BIN} --includedir
OUTPUT_VARIABLE APR_INCLUDE_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE
)
ENDIF()
#IF (APU_CONFIG_BIN)
# EXECUTE_PROCESS(
# COMMAND ${APU_CONFIG_BIN} --includedir
# OUTPUT_VARIABLE APR_UTIL_INCLUDE_DIR
# OUTPUT_STRIP_TRAILING_WHITESPACE
# )
#ENDIF()
include_directories (${APR_INCLUDE_DIR})
#include_directories (${APR_UTIL_INCLUDE_DIR})
target_include_directories(
cosTest
PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk"
)
#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
#find_library(MINIXML_LIBRARY mxml)
#find_library(CURL_LIBRARY curl)
target_link_libraries(cosTest cos_c_sdk)
target_link_libraries(cosTest apr-1})
target_link_libraries(cosTest aprutil-1})
target_link_libraries(cosTest mxml)
target_link_libraries(cosTest curl)

3095
contrib/test/cos/main.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -35,11 +35,12 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
:::note
- All the data in `tag_set` will be converted to NCHAR type automatically .
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
- All the data in `tag_set` will be converted to NCHAR type automatically
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h)
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be created automatically. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored
- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3, smlDataFormat is discarded since 3.0.3.0)
:::
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)

View File

@ -34,6 +34,7 @@ meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
## Examples
@ -68,11 +69,11 @@ Database changed.
taos> show stables;
name |
=================================
meters.current |
meters.voltage |
meters_current |
meters_voltage |
Query OK, 2 row(s) in set (0.002544s)
taos> select tbname, * from `meters.current`;
taos> select tbname, * from `meters_current`;
tbname | _ts | _value | groupid | location |
==================================================================================================================================
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles |
@ -87,5 +88,5 @@ Query OK, 4 row(s) in set (0.005399s)
If you want query the data of `location=California.LosAngeles groupid=3`, here is the query SQL:
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
```

View File

@ -49,6 +49,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
- In JSON protocol, strings will be converted to NCHAR type and numeric values will be converted to double type.
- The child table name is created automatically in a rule to guarantee its uniqueness. But you can configure `smlChildTableName` in taos.cfg to specify a tag value as the table names if the tag value is unique globally. For example, if a tag is called `tname` and you set `smlChildTableName=tname` in taos.cfg, when you insert `st,tname=cpu1,t1=4 c1=3 1626006833639000000`, the child table `cpu1` will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
:::
## Examples
@ -83,11 +84,11 @@ Database changed.
taos> show stables;
name |
=================================
meters.current |
meters.voltage |
meters_current |
meters_voltage |
Query OK, 2 row(s) in set (0.001954s)
taos> select * from `meters.current`;
taos> select * from `meters_current`;
_ts | _value | groupid | location |
===================================================================================================================
2022-03-28 09:56:51.249 | 10.300000000 | 2.000000000 | California.SanFrancisco |
@ -100,5 +101,5 @@ Query OK, 2 row(s) in set (0.004076s)
If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1}, here is the query SQL:
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
```

View File

@ -52,7 +52,7 @@ CREATE TABLE d1004 USING meters TAGS ("California.LosAngeles", 3);
### Create a Stream
```sql
create stream current_stream into current_stream_output_stb as select _wstart as start, _wend as end, max(current) as max_current from meters where voltage <= 220 interval (5s);
create stream current_stream trigger at_once into current_stream_output_stb as select _wstart as wstart, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);
```
### Write Data

View File

@ -23,7 +23,20 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
Tips:(c interface for example)
1. A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
2. A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
3. On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
4. Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block;
5. If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
6. If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
7. The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
8. Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again;
9. Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
10. Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
11. Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
12. The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
13. Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
## Data Schema and API
@ -33,40 +46,59 @@ The related schemas and APIs in various languages are described as follows:
<TabItem value="c" label="C">
```c
typedef struct tmq_t tmq_t;
typedef struct tmq_conf_t tmq_conf_t;
typedef struct tmq_list_t tmq_list_t;
typedef struct tmq_t tmq_t;
typedef struct tmq_conf_t tmq_conf_t;
typedef struct tmq_list_t tmq_list_t;
typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param));
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
DLL_EXPORT tmq_list_t *tmq_list_new();
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
DLL_EXPORT const char *tmq_err2str(int32_t code);
typedef enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end; // The last version of wal + 1
} tmq_topic_assignment;
enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
};
typedef enum tmq_conf_res_t tmq_conf_res_t;
DLL_EXPORT tmq_conf_t *tmq_conf_new();
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
DLL_EXPORT tmq_conf_t *tmq_conf_new();
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
DLL_EXPORT tmq_list_t *tmq_list_new();
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *);
DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *);
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msgs offset + 1
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment);
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result
DLL_EXPORT const char *tmq_err2str(int32_t code);
```
For more information, see [C/C++ Connector](/reference/connector/cpp).
The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below.
</TabItem>

View File

@ -168,3 +168,11 @@ All [scalar functions](../function/#scalar-functions) are available in stream pr
- [unique](../function/#unique)
- [mode](../function/#mode)
## Pause\Resume stream
1.pause stream
PAUSE STREAM [IF EXISTS] stream_name;
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported; If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, paused all stream tasks.
2.resume stream
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
If "IF EXISTS" is not specified and the stream does not exist, an error will be reported. If "IF EXISTS" is specified and the stream does not exist, success is returned; If the stream exists, all of the stream tasks will be resumed. If "IGNORE UntREATED" is specified, data written during the pause period of stream is ignored when resuming stream.

View File

@ -4,7 +4,7 @@ sidebar_label: Access Control
description: This document describes how to manage users and permissions in TDengine.
---
This document describes how to manage permissions in TDengine.
User and Access control is a distingguished feature of TDengine enterprise edition. In this section, only the most fundamental functionalities of user and access control are demonstrated. To get the full knowledge of user and access control, please contact the TDengine team.
## Create a User

View File

@ -135,7 +135,7 @@ The following describes the basic API, synchronous API, asynchronous API, subscr
The base API is used to do things like create database connections and provide a runtime environment for the execution of other APIs.
- `void taos_init()`
- `int taos_init()`
Initializes the runtime environment. If the API is not actively called, the driver will automatically call the API when `taos_connect()` is called, so the program generally does not need to call it manually.
@ -378,7 +378,7 @@ In addition to writing data using the SQL method or the parameter binding API, w
- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)`
**Function description**
This interface writes the text data of the line protocol to TDengine.
- This interface writes the text data of the line protocol to TDengine.
**Parameter description**
- taos: database connection, established by the `taos_connect()` function.
@ -387,12 +387,13 @@ In addition to writing data using the SQL method or the parameter binding API, w
- protocol: the protocol type of the lines, used to identify the text data format.
- precision: precision string for the timestamp in the text data.
**return value**
TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`.
**Return value**
- TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`.
In some cases, the returned TAOS_RES is `NULL`, and it is still possible to call `taos_errno()` to safely get the error code information.
The returned TAOS_RES needs to be freed by the caller in order to avoid memory leaks.
**Description**
The protocol type is enumerated and contains the following three formats.
- TSDB_SML_LINE_PROTOCOL: InfluxDB line protocol (Line Protocol)
@ -427,3 +428,90 @@ In addition to writing data using the SQL method or the parameter binding API, w
- Within _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
- Within _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
- Within _reqid interfaces can track the entire call chain by passing the reqid parameter.
### Subscription API
- `int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment)`
- `void tmq_free_assignment(tmq_topic_assignment* pAssignment)`
tmq_topic_assignment defined as follows:
```c
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
```
**Function description**
- tmq_get_topic_assignment get the current vgroup information of this consumer
**Parameter description**
- numOfAssignment:the num of vgroups assigned to this consumer
- assignment:the information of vgroups, needed to be freed by tmq_free_assignment
**Return value**
- zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
**Function description**
- get the committed offset
**Return value**
- the value of committed offset, -2147467247 means no committed value, Other values less than 0 indicate failure
- `int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg)`
- `void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param)`
- `int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
- `void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param)`
**Function description**
- The commit interface is divided into two types, each with synchronous and asynchronous interfaces:
- The first type: based on message submission, submit the progress in the message. If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer: tmq_commit_sync/tmq_commit_async
- The second type: submit based on the offset of a Vgroup in a topic: tmq_commit_offset_sync/tmq_commit_offset_async
**Parameter description**
- msgMessage consumed, If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer
**Return value**
- zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
**Function description**
- Obtain the current consumption location, which is the next location of the data consumed
**Return value**
- the current consumption location, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
**Function description**
- Set the offset position of the consumer in a Vgroup of a certain topic to start consumption
**Return value**
- zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)`
**Function description**
- Obtain the starting offset of the consumed data
**Parameter description**
- msgMessage consumed
**Return value**
- the starting offset of the consumed data, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)`
**Function description**
- Obtain a list of topics subscribed by consumers
**Parameter description**
- topics: a list of topics subscribed by consumersneed to be freed by tmq_list_destroy
**Return value**
- zero successnone zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`

View File

@ -1091,8 +1091,6 @@ public abstract class ConsumerLoop {
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
@ -1176,7 +1174,6 @@ public abstract class ConsumerLoop {
config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
config.setProperty("value.deserializer.encoding", "UTF-8");
config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");

View File

@ -81,6 +81,14 @@ For example:
taos -h h1.taos.com -s "use db; show tables;"
```
## Export query results to a file
- You can use ">>" to export the query results to a file, the syntax is like `select * from table >> file`. If there is only file name without path, the file will be generated under the current working directory of TDegnine CLI.
## Import data from CSV file
- You can use `insert into table_name file 'fileName'` to import the data from the specified file into the specified table. For example, `insert into d0 file '/root/d0.csv';` means importing the data in file "/root/d0.csv" into table "d0". If there is only file name without path, that means the file is located under current working directory of TDengine CLI.
## TDengine CLI tips
- You can use the up and down keys to iterate the history of commands entered
@ -89,3 +97,5 @@ taos -h h1.taos.com -s "use db; show tables;"
- Execute `RESET QUERY CACHE` to clear the local cache of the table schema
- Execute SQL statements in batches. You can store a series of shell commands (ending with ;, one line for each SQL command) in a script file and execute the command `source <file-name>` in the TDengine CLI to execute all SQL commands in that file automatically
- Enter `q` to exit TDengine CLI

View File

@ -32,8 +32,8 @@ All data in tag_set is automatically converted to the NCHAR data type and does n
In the schemaless writing data line protocol, each data item in the field_set needs to be described with its data type. Let's explain in detail:
- If there are English double quotes on both sides, it indicates the BINARY(32) type. For example, `"abc"`.
- If there are double quotes on both sides and an L prefix, it means NCHAR(32) type. For example, `L"error message"`.
- If there are English double quotes on both sides, it indicates the VARCHAR(N) type. For example, `"abc"`.
- If there are double quotes on both sides and an L prefix, it means NCHAR(N) type. For example, `L"error message"`.
- Spaces, equals sign (=), comma (,), double quote ("), and backslash (\\) need to be escaped with a backslash (\\) in front. (All refer to the ASCII character). The rules are as follows:
| **Serial number** | **Element** | **Escape characters** |

View File

@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t
import Release from "/components/ReleaseV3";
## 3.1.0.2
<Release type="tdengine" version="3.1.0.2" />
## 3.1.0.0
:::note IMPORTANT

View File

@ -227,12 +227,6 @@ tmq_t* build_consumer() {
return NULL;
}
code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
if (TMQ_CONF_OK != code) {
tmq_conf_destroy(conf);
return NULL;
}
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);

View File

@ -35,7 +35,6 @@ func main() {
"td.connect.port": "6030",
"client.id": "test_tmq_client",
"enable.auto.commit": "false",
"experimental.snapshot.enable": "true",
"msg.with.table.name": "true",
})
if err != nil {

View File

@ -34,11 +34,12 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
:::note
- tag_set 中的所有的数据自动转化为 NCHAR 数据类型;
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理;
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度
- tag_set 中的所有的数据自动转化为 NCHAR 数据类型
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 FLOAT 类型的数值 1.2, 如果不带类型后缀会被当作 DOUBLE 处理
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度
- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field后面的数据按照这个顺序如果顺序不一样需要配置参数 smlDataFormat 为 false否则数据写入按照相同顺序写入库中数据会异常。3.0.1.3 之后的版本 smlDataFormat 默认为 false从3.0.3.0开始,该配置废弃) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
- 默认产生的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
:::
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)

View File

@ -67,11 +67,11 @@ Database changed.
taos> SHOW STABLES;
name |
=================================
meters.current |
meters.voltage |
meters_current |
meters_voltage |
Query OK, 2 row(s) in set (0.002544s)
taos> SELECT TBNAME, * FROM `meters.current`;
taos> SELECT TBNAME, * FROM `meters_current`;
tbname | _ts | _value | groupid | location |
==================================================================================================================================
t_0e7bcfa21a02331c06764f275... | 2022-03-28 09:56:51.249 | 10.800000000 | 3 | California.LosAngeles |
@ -83,10 +83,10 @@ Query OK, 4 row(s) in set (0.005399s)
## SQL 查询示例
`meters.current` 是插入数据的超级表名。
`meters_current` 是插入数据的超级表名。
可以通过超级表的 TAG 来过滤数据,比如查询 `location=California.LosAngeles groupid=3` 可以通过如下 SQL
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
```

View File

@ -48,6 +48,7 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
- 对于 JSON 格式协议TDengine 并不会自动把所有标签转成 NCHAR 类型, 字符串将将转为 NCHAR 类型, 数值将同样转换为 DOUBLE 类型。
- 默认生成的子表名是根据规则生成的唯一 ID 值。用户也可以通过在client端的 taos.cfg 里配置 smlChildTableName 参数来指定某个标签值作为子表名。该标签值应该具有全局唯一性。举例如下假设有个标签名为tname, 配置 smlChildTableName=tname, 插入数据为 `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` 则创建的子表名为 cpu1。注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set其他的行会忽略
:::
## 示例代码
@ -82,8 +83,8 @@ Database changed.
taos> SHOW STABLES;
name |
=================================
meters.current |
meters.voltage |
meters_current |
meters_voltage |
Query OK, 2 row(s) in set (0.001954s)
taos> SELECT * FROM `meters.current`;
@ -96,10 +97,10 @@ Query OK, 2 row(s) in set (0.004076s)
## SQL 查询示例
`meters.voltage` 是插入数据的超级表名。
`meters_voltage` 是插入数据的超级表名。
可以通过超级表的 TAG 来过滤数据,比如查询 `location=California.LosAngeles groupid=1` 可以通过如下 SQL
```sql
SELECT * FROM `meters.current` WHERE location = "California.LosAngeles" AND groupid = 3;
SELECT * FROM `meters_current` WHERE location = "California.LosAngeles" AND groupid = 3;
```

View File

@ -25,7 +25,20 @@ import CDemo from "./_sub_c.mdx";
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
注意:数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似。
说明以c接口为例
1. 一个消费组消费同一个topic下的所有数据不同消费组之间相互独立
2. 一个消费组消费同一个topic所有的vgroup消费组可由多个消费者组成但一个vgroup仅被一个消费者消费如果消费者数量超过了vgroup数量多余的消费者不消费数据
3. 在服务端每个vgroup仅保存一个offset每个vgroup的offset是单调递增的但不一定连续。各个vgroup的offset之间没有关联
4. 每次poll服务端会返回一个结果block该block属于一个vgroup可能包含多个wal版本的数据可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset
5. 一个消费组如果从未commit过offset当其成员消费者重启重新拉取数据时均从参数auto.offset.reset设定值开始消费在一个消费者生命周期中客户端本地记录了最近一次拉取数据的offset不会拉取重复数据
6. 消费者如果异常终止没有调用tmq_close需等约12秒后触发其所属消费组rebalance该消费者在服务端状态变为LOST约1天后该消费者自动被删除正常退出退出后就会删除消费者新增消费者需等约2秒触发rebalance该消费者在服务端状态变为ready
7. 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作
8. 消费者可利用 tmq_position 获得当前消费的offset并seek到指定offset重新消费
9. seek将position指向指定offset不执行commit操作一旦seek成功可poll拉取指定offset及以后的数据
10. seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法如非法将报错
11. tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset当seek至该offset时将消费到这个block里的全部数据。参见第四点
12. 由于存在 WAL 过期删除机制即使seek 操作成功poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号将会从WAL最小版本号消费
13. 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
## 主要数据结构和 API
@ -35,39 +48,60 @@ import CDemo from "./_sub_c.mdx";
<TabItem value="c" label="C">
```c
typedef struct tmq_t tmq_t;
typedef struct tmq_conf_t tmq_conf_t;
typedef struct tmq_list_t tmq_list_t;
typedef struct tmq_t tmq_t;
typedef struct tmq_conf_t tmq_conf_t;
typedef struct tmq_list_t tmq_list_t;
typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param));
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
DLL_EXPORT tmq_list_t *tmq_list_new();
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
DLL_EXPORT const char *tmq_err2str(int32_t code);
typedef enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
};
typedef enum tmq_conf_res_t tmq_conf_res_t;
DLL_EXPORT tmq_conf_t *tmq_conf_new();
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
DLL_EXPORT tmq_conf_t *tmq_conf_new();
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
DLL_EXPORT tmq_list_t *tmq_list_new();
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *);
DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *);
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment);
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
```
这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
</TabItem>
<TabItem value="java" label="Java">

View File

@ -223,7 +223,7 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
基础 API 用于完成创建数据库连接等工作,为其它 API 的执行提供运行时环境。
- `void taos_init()`
- `int taos_init()`
初始化运行环境。如果没有主动调用该 API那么调用 `taos_connect()` 时驱动将自动调用该 API故程序一般无需手动调用。
@ -467,21 +467,22 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)`
**功能说明**
该接口将行协议的文本数据写入到 TDengine 中。
- 该接口将行协议的文本数据写入到 TDengine 中。
**参数说明**
taos: 数据库连接,通过 `taos_connect()` 函数建立的数据库连接。
lines文本数据。满足解析格式要求的无模式文本字符串。
numLines:文本数据的行数,不能为 0 。
protocol: 行协议类型,用于标识文本数据格式。
precision文本数据中的时间戳精度字符串。
- taos: 数据库连接,通过 `taos_connect()` 函数建立的数据库连接。
- lines文本数据。满足解析格式要求的无模式文本字符串。
- numLines:文本数据的行数,不能为 0 。
- protocol: 行协议类型,用于标识文本数据格式。
- precision文本数据中的时间戳精度字符串。
**返回值**
TAOS_RES 结构体,应用可以通过使用 `taos_errstr()` 获得错误信息,也可以使用 `taos_errno()` 获得错误码。
- TAOS_RES 结构体,应用可以通过使用 `taos_errstr()` 获得错误信息,也可以使用 `taos_errno()` 获得错误码。
在某些情况下,返回的 TAOS_RES 为 `NULL`,此时仍然可以调用 `taos_errno()` 来安全地获得错误码信息。
返回的 TAOS_RES 需要调用方来负责释放,否则会出现内存泄漏。
**说明**
协议类型是枚举类型,包含以下三种格式:
- TSDB_SML_LINE_PROTOCOLInfluxDB 行协议Line Protocol)
@ -515,3 +516,90 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
- 带_raw的接口通过传递的参数lines指针和长度len来表示数据为了解决原始接口数据包含'\0'而被截断的问题。totalRows指针返回解析出来的数据行数。
- 带_ttl的接口可以传递ttl参数来控制建表的ttl到期时间。
- 带_reqid的接口可以通过传递reqid参数来追踪整个的调用链。
### 数据订阅 API
- `int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment)`
- `void tmq_free_assignment(tmq_topic_assignment* pAssignment)`
tmq_topic_assignment结构体定义如下
```c
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
int64_t begin;
int64_t end;
} tmq_topic_assignment;
```
**功能说明**
- tmq_get_topic_assignment 接口返回当前consumer分配的vgroup的信息每个vgroup的信息包括vgIdwal的最大最小offset以及当前消费到的offset。
**参数说明**
- numOfAssignment 分配给该consumer有效的vgroup个数。
- assignment 分配的信息数据大小为numOfAssignment需要通过 tmq_free_assignment 接口释放。
**返回值**
- 错误码0成功非0失败可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息。
- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
**功能说明**
- 获取当前 consumer 在某个 topic 和 vgroup上的 commit 位置。
**返回值**
- 当前commit的位置-2147467247表示没有消费进度其他小于0的值表示失败错误码就是返回值
- `int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg)`
- `void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param)`
- `int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
- `void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param)`
**功能说明**
- commit接口分为两种类型每种类型有同步和异步接口
- 第一种类型根据消息提交提交消息里的进度如果消息传NULL提交当前consumer所有消费的vgroup的当前进度 : tmq_commit_sync/tmq_commit_async
- 第二种类型根据某个topic的某个vgroup的offset提交 : tmq_commit_offset_sync/tmq_commit_offset_async
**参数说明**
- msg消费到的消息结构如果msg传NULL提交当前consumer所有消费的vgroup的当前进度
**返回值**
- 错误码0成功非0失败可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
- `int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
**功能说明**
- 获取当前消费位置,为消费到的数据位置的下一个位置
**返回值**
- 消费位置非0失败可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
- `int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
**功能说明**
- 设置 consumer 在某个topic的某个vgroup的 offset位置开始消费
**返回值**
- 错误码0成功非0失败可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)`
**功能说明**
- 获取 poll 消费到的数据的起始offset
**参数说明**
- msg消费到的消息结构
**返回值**
- 消费到的offset非0失败可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)`
**功能说明**
- 获取消费者订阅的 topic 列表
**参数说明**
- topics: 获取的 topic 列表存储在这个结构中接口内分配内存需调用tmq_list_destroy释放
**返回值**
- 错误码0成功非0失败可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息

View File

@ -249,3 +249,12 @@ T = 最新事件时间 - DELETE_MARK
- [unique](../function/#unique)
- [mode](../function/#mode)
## 暂停、恢复流计算
1.流计算暂停计算任务
PAUSE STREAM [IF EXISTS] stream_name;
没有指定IF EXISTS如果该stream不存在则报错如果存在则暂停流计算。指定了IF EXISTS如果该stream不存在则返回成功如果存在则暂停流计算
2.流计算恢复计算任务
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
没有指定IF EXISTS如果该stream不存在则报错如果存在则恢复流计算指定了IF EXISTS如果stream不存在则返回成功如果存在则恢复流计算。如果指定IGNORE UNTREATED则恢复流计算时忽略流计算暂停期间写入的数据。

View File

@ -4,7 +4,7 @@ title: 权限管理
description: 企业版中才具有的权限管理功能
---
本节讲述如何在 TDengine 中进行权限管理的相关操作。
本节讲述如何在 TDengine 中进行权限管理的相关操作。权限管理是 TDengine 企业版的特有功能,本节只列举了一些基本的权限管理功能作为示例,更丰富的权限管理请联系 TDengine 销售或市场团队。
## 创建用户

View File

@ -89,3 +89,11 @@ taos -h h1.taos.com -s "use db; show tables;"
- 执行 `RESET QUERY CACHE` 可清除本地表 Schema 的缓存
- 批量执行 SQL 语句。可以将一系列的 TDengine CLI 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 TDengine CLI 里执行命令 `source <file-name>` 自动执行该文件里所有的 SQL 语句
- 输入 `q``quit``exit` 回车,可以退出 TDengine CLI
## TDengine CLI 导出查询结果到文件中
- 可以使用符号 “>>” 导出查询结果到某个文件中,语法为: sql 查询语句 >> ‘输出文件名’; 输出文件如果不写路径的话,将输出至当前目录下。如 select * from d0 >> /root/d0.csv; 将把查询结果输出到 /root/d0.csv 中。
## TDengine CLI 导入文件中的数据到表中
- 可以使用 insert into table_name file '输入文件名',把上一步中导出的数据文件再导入到指定表中。如 insert into d0 file '/root/d0.csv'; 表示把上面导出的数据全部再导致至 d0 表中。

View File

@ -33,8 +33,8 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
在无模式写入数据行协议中field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说:
- 如果两边有英文双引号,表示 BINARY(32) 类型。例如 `"abc"`
- 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(32) 类型。例如 `L"报错信息"`
- 如果两边有英文双引号,表示 VARCHAR(N) 类型。例如 `"abc"`
- 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(N) 类型。例如 `L"报错信息"`
- 对空格、等号(=)、逗号(,)、双引号(")、反斜杠(\),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)。具体转义规则如下:
| **序号** | **域** | **需转义字符** |

View File

@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
## 3.1.0.2
<Release type="tdengine" version="3.1.0.2" />
## 3.1.0.0
<Release type="tdengine" version="3.1.0.0" />

View File

@ -32,12 +32,12 @@ static void queryDB(TAOS *taos, char *command) {
taos_free_result(pSql);
pSql = NULL;
}
pSql = taos_query(taos, command);
code = taos_errno(pSql);
if (0 == code) {
break;
}
}
}
if (code != 0) {
@ -63,7 +63,7 @@ int main(int argc, char *argv[]) {
TAOS *taos = taos_connect(argv[1], "root", "taosdata", NULL, 0);
if (taos == NULL) {
printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/);
printf("failed to connect to server, reason:%s\n", taos_errstr(NULL));
exit(1);
}
for (int i = 0; i < 100; i++) {
@ -86,14 +86,14 @@ void Test(TAOS *taos, char *qstr, int index) {
for (i = 0; i < 10; ++i) {
sprintf(qstr, "insert into m1 values (%" PRId64 ", %d, %d, %d, %d, %f, %lf, '%s')", (uint64_t)(1546300800000 + i * 1000), i, i, i, i*10000000, i*1.0, i*2.0, "hello");
printf("qstr: %s\n", qstr);
// note: how do you wanna do if taos_query returns non-NULL
// if (taos_query(taos, qstr)) {
// printf("insert row: %i, reason:%s\n", i, taos_errstr(taos));
// }
TAOS_RES *result1 = taos_query(taos, qstr);
if (result1 == NULL || taos_errno(result1) != 0) {
printf("failed to insert row, reason:%s\n", taos_errstr(result1));
printf("failed to insert row, reason:%s\n", taos_errstr(result1));
taos_free_result(result1);
exit(1);
} else {
@ -107,7 +107,7 @@ void Test(TAOS *taos, char *qstr, int index) {
sprintf(qstr, "SELECT * FROM m1");
result = taos_query(taos, qstr);
if (result == NULL || taos_errno(result) != 0) {
printf("failed to select, reason:%s\n", taos_errstr(result));
printf("failed to select, reason:%s\n", taos_errstr(result));
taos_free_result(result);
exit(1);
}

View File

@ -228,11 +228,6 @@ tmq_t* build_consumer() {
tmq_conf_destroy(conf);
return NULL;
}
code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
if (TMQ_CONF_OK != code) {
tmq_conf_destroy(conf);
return NULL;
}
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq = tmq_consumer_new(conf, NULL, 0);

View File

@ -260,19 +260,14 @@ typedef struct tmq_t tmq_t;
typedef struct tmq_conf_t tmq_conf_t;
typedef struct tmq_list_t tmq_list_t;
typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param));
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
DLL_EXPORT tmq_list_t *tmq_list_new();
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *);
DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *);
typedef enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
} tmq_conf_res_t;
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
DLL_EXPORT const char *tmq_err2str(int32_t code);
/* ------------------------TMQ CONSUMER INTERFACE------------------------ */
typedef struct tmq_topic_assignment {
int32_t vgId;
int64_t currentOffset;
@ -280,43 +275,38 @@ typedef struct tmq_topic_assignment {
int64_t end;
} tmq_topic_assignment;
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,
int32_t *numOfAssignment);
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */
enum tmq_conf_res_t {
TMQ_CONF_UNKNOWN = -2,
TMQ_CONF_INVALID = -1,
TMQ_CONF_OK = 0,
};
typedef enum tmq_conf_res_t tmq_conf_res_t;
DLL_EXPORT tmq_conf_t *tmq_conf_new();
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */
DLL_EXPORT tmq_list_t *tmq_list_new();
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *);
DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *);
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msgs offset + 1
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment);
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
DLL_EXPORT const char *tmq_err2str(int32_t code);
/* ------------------------------ TAOSX -----------------------------------*/
// note: following apis are unstable

View File

@ -152,6 +152,8 @@ enum {
STREAM_INPUT__DATA_RETRIEVE,
STREAM_INPUT__GET_RES,
STREAM_INPUT__CHECKPOINT,
STREAM_INPUT__CHECKPOINT_TRIGGER,
STREAM_INPUT__TRANS_STATE,
STREAM_INPUT__REF_DATA_BLOCK,
STREAM_INPUT__DESTROY,
};
@ -168,7 +170,9 @@ typedef enum EStreamType {
STREAM_PULL_DATA,
STREAM_PULL_OVER,
STREAM_FILL_OVER,
STREAM_CHECKPOINT,
STREAM_CREATE_CHILD_TABLE,
STREAM_TRANS_STATE,
} EStreamType;
#pragma pack(push, 1)

View File

@ -179,6 +179,8 @@ int32_t getJsonValueLen(const char* data);
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue);
int32_t colDataCopyNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
uint32_t numOfRows, bool isNull);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
const SColumnInfoData* pSource, int32_t numOfRow2);
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
@ -241,7 +243,7 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols);
const char* blockDecode(SSDataBlock* pBlock, const char* pData);
// for debug
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf, const char* taskIdStr);
int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pDataBlocks, const STSchema* pTSchema, int64_t uid, int32_t vgId,
tb_uid_t suid);

View File

@ -199,6 +199,7 @@ extern bool tsFilterScalarMode;
extern int32_t tsKeepTimeOffset;
extern int32_t tsMaxStreamBackendCache;
extern int32_t tsPQSortMemThreshold;
extern int32_t tsResolveFQDNRetryTime;
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)

View File

@ -77,7 +77,8 @@ static inline bool tmsgIsValid(tmsg_t type) {
}
static inline bool vnodeIsMsgBlock(tmsg_t type) {
return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) ||
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_COMMIT);
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM) || (type == TDMT_VND_COMMIT) ||
(type == TDMT_SYNC_CONFIG_CHANGE);
}
static inline bool syncUtilUserCommit(tmsg_t msgType) {
@ -212,6 +213,215 @@ typedef enum _mgmt_table {
#define TD_REQ_FROM_APP 0
#define TD_REQ_FROM_TAOX 1
typedef enum ENodeType {
// Syntax nodes are used in parser and planner module, and some are also used in executor module, such as COLUMN,
// VALUE, OPERATOR, FUNCTION and so on.
QUERY_NODE_COLUMN = 1,
QUERY_NODE_VALUE,
QUERY_NODE_OPERATOR,
QUERY_NODE_LOGIC_CONDITION,
QUERY_NODE_FUNCTION,
QUERY_NODE_REAL_TABLE,
QUERY_NODE_TEMP_TABLE,
QUERY_NODE_JOIN_TABLE,
QUERY_NODE_GROUPING_SET,
QUERY_NODE_ORDER_BY_EXPR,
QUERY_NODE_LIMIT,
QUERY_NODE_STATE_WINDOW,
QUERY_NODE_SESSION_WINDOW,
QUERY_NODE_INTERVAL_WINDOW,
QUERY_NODE_NODE_LIST,
QUERY_NODE_FILL,
QUERY_NODE_RAW_EXPR, // Only be used in parser module.
QUERY_NODE_TARGET,
QUERY_NODE_DATABLOCK_DESC,
QUERY_NODE_SLOT_DESC,
QUERY_NODE_COLUMN_DEF,
QUERY_NODE_DOWNSTREAM_SOURCE,
QUERY_NODE_DATABASE_OPTIONS,
QUERY_NODE_TABLE_OPTIONS,
QUERY_NODE_INDEX_OPTIONS,
QUERY_NODE_EXPLAIN_OPTIONS,
QUERY_NODE_STREAM_OPTIONS,
QUERY_NODE_LEFT_VALUE,
QUERY_NODE_COLUMN_REF,
QUERY_NODE_WHEN_THEN,
QUERY_NODE_CASE_WHEN,
QUERY_NODE_EVENT_WINDOW,
QUERY_NODE_HINT,
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100,
QUERY_NODE_SELECT_STMT,
QUERY_NODE_VNODE_MODIFY_STMT,
QUERY_NODE_CREATE_DATABASE_STMT,
QUERY_NODE_DROP_DATABASE_STMT,
QUERY_NODE_ALTER_DATABASE_STMT,
QUERY_NODE_FLUSH_DATABASE_STMT,
QUERY_NODE_TRIM_DATABASE_STMT,
QUERY_NODE_CREATE_TABLE_STMT,
QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
QUERY_NODE_CREATE_MULTI_TABLES_STMT,
QUERY_NODE_DROP_TABLE_CLAUSE,
QUERY_NODE_DROP_TABLE_STMT,
QUERY_NODE_DROP_SUPER_TABLE_STMT,
QUERY_NODE_ALTER_TABLE_STMT,
QUERY_NODE_ALTER_SUPER_TABLE_STMT,
QUERY_NODE_CREATE_USER_STMT,
QUERY_NODE_ALTER_USER_STMT,
QUERY_NODE_DROP_USER_STMT,
QUERY_NODE_USE_DATABASE_STMT,
QUERY_NODE_CREATE_DNODE_STMT,
QUERY_NODE_DROP_DNODE_STMT,
QUERY_NODE_ALTER_DNODE_STMT,
QUERY_NODE_CREATE_INDEX_STMT,
QUERY_NODE_DROP_INDEX_STMT,
QUERY_NODE_CREATE_QNODE_STMT,
QUERY_NODE_DROP_QNODE_STMT,
QUERY_NODE_CREATE_BNODE_STMT,
QUERY_NODE_DROP_BNODE_STMT,
QUERY_NODE_CREATE_SNODE_STMT,
QUERY_NODE_DROP_SNODE_STMT,
QUERY_NODE_CREATE_MNODE_STMT,
QUERY_NODE_DROP_MNODE_STMT,
QUERY_NODE_CREATE_TOPIC_STMT,
QUERY_NODE_DROP_TOPIC_STMT,
QUERY_NODE_DROP_CGROUP_STMT,
QUERY_NODE_ALTER_LOCAL_STMT,
QUERY_NODE_EXPLAIN_STMT,
QUERY_NODE_DESCRIBE_STMT,
QUERY_NODE_RESET_QUERY_CACHE_STMT,
QUERY_NODE_COMPACT_DATABASE_STMT,
QUERY_NODE_CREATE_FUNCTION_STMT,
QUERY_NODE_DROP_FUNCTION_STMT,
QUERY_NODE_CREATE_STREAM_STMT,
QUERY_NODE_DROP_STREAM_STMT,
QUERY_NODE_BALANCE_VGROUP_STMT,
QUERY_NODE_MERGE_VGROUP_STMT,
QUERY_NODE_REDISTRIBUTE_VGROUP_STMT,
QUERY_NODE_SPLIT_VGROUP_STMT,
QUERY_NODE_SYNCDB_STMT,
QUERY_NODE_GRANT_STMT,
QUERY_NODE_REVOKE_STMT,
QUERY_NODE_SHOW_DNODES_STMT,
QUERY_NODE_SHOW_MNODES_STMT,
QUERY_NODE_SHOW_MODULES_STMT,
QUERY_NODE_SHOW_QNODES_STMT,
QUERY_NODE_SHOW_SNODES_STMT,
QUERY_NODE_SHOW_BNODES_STMT,
QUERY_NODE_SHOW_CLUSTER_STMT,
QUERY_NODE_SHOW_DATABASES_STMT,
QUERY_NODE_SHOW_FUNCTIONS_STMT,
QUERY_NODE_SHOW_INDEXES_STMT,
QUERY_NODE_SHOW_STABLES_STMT,
QUERY_NODE_SHOW_STREAMS_STMT,
QUERY_NODE_SHOW_TABLES_STMT,
QUERY_NODE_SHOW_TAGS_STMT,
QUERY_NODE_SHOW_USERS_STMT,
QUERY_NODE_SHOW_LICENCES_STMT,
QUERY_NODE_SHOW_VGROUPS_STMT,
QUERY_NODE_SHOW_TOPICS_STMT,
QUERY_NODE_SHOW_CONSUMERS_STMT,
QUERY_NODE_SHOW_CONNECTIONS_STMT,
QUERY_NODE_SHOW_QUERIES_STMT,
QUERY_NODE_SHOW_APPS_STMT,
QUERY_NODE_SHOW_VARIABLES_STMT,
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_USER_PRIVILEGES_STMT,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
QUERY_NODE_SHOW_SCORES_STMT,
QUERY_NODE_SHOW_TABLE_TAGS_STMT,
QUERY_NODE_KILL_CONNECTION_STMT,
QUERY_NODE_KILL_QUERY_STMT,
QUERY_NODE_KILL_TRANSACTION_STMT,
QUERY_NODE_DELETE_STMT,
QUERY_NODE_INSERT_STMT,
QUERY_NODE_QUERY,
QUERY_NODE_SHOW_DB_ALIVE_STMT,
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
QUERY_NODE_RESTORE_DNODE_STMT,
QUERY_NODE_RESTORE_QNODE_STMT,
QUERY_NODE_RESTORE_MNODE_STMT,
QUERY_NODE_RESTORE_VNODE_STMT,
QUERY_NODE_PAUSE_STREAM_STMT,
QUERY_NODE_RESUME_STREAM_STMT,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
QUERY_NODE_LOGIC_PLAN_JOIN,
QUERY_NODE_LOGIC_PLAN_AGG,
QUERY_NODE_LOGIC_PLAN_PROJECT,
QUERY_NODE_LOGIC_PLAN_VNODE_MODIFY,
QUERY_NODE_LOGIC_PLAN_EXCHANGE,
QUERY_NODE_LOGIC_PLAN_MERGE,
QUERY_NODE_LOGIC_PLAN_WINDOW,
QUERY_NODE_LOGIC_PLAN_FILL,
QUERY_NODE_LOGIC_PLAN_SORT,
QUERY_NODE_LOGIC_PLAN_PARTITION,
QUERY_NODE_LOGIC_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_LOGIC_PLAN_INTERP_FUNC,
QUERY_NODE_LOGIC_SUBPLAN,
QUERY_NODE_LOGIC_PLAN,
QUERY_NODE_LOGIC_PLAN_GROUP_CACHE,
QUERY_NODE_LOGIC_PLAN_DYN_QUERY_CTRL,
// physical plan node
QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN,
QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN,
QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN,
QUERY_NODE_PHYSICAL_PLAN_PROJECT,
QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN,
QUERY_NODE_PHYSICAL_PLAN_HASH_AGG,
QUERY_NODE_PHYSICAL_PLAN_EXCHANGE,
QUERY_NODE_PHYSICAL_PLAN_MERGE,
QUERY_NODE_PHYSICAL_PLAN_SORT,
QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT,
QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_FILL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION,
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
QUERY_NODE_PHYSICAL_PLAN_INSERT,
QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT,
QUERY_NODE_PHYSICAL_PLAN_DELETE,
QUERY_NODE_PHYSICAL_SUBPLAN,
QUERY_NODE_PHYSICAL_PLAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT,
QUERY_NODE_PHYSICAL_PLAN_HASH_JOIN,
QUERY_NODE_PHYSICAL_PLAN_GROUP_CACHE,
QUERY_NODE_PHYSICAL_PLAN_DYN_QUERY_CTRL
} ENodeType;
typedef struct {
int32_t vgId;
char* dbFName;
@ -231,6 +441,7 @@ typedef struct SField {
uint8_t type;
int8_t flags;
int32_t bytes;
char comment[TSDB_COL_COMMENT_LEN];
} SField;
typedef struct SRetention {
@ -309,6 +520,7 @@ struct SSchema {
col_id_t colId;
int32_t bytes;
char name[TSDB_COL_NAME_LEN];
char comment[TSDB_COL_COMMENT_LEN];
};
struct SSchema2 {
@ -741,6 +953,10 @@ typedef struct STimeWindow {
TSKEY ekey;
} STimeWindow;
typedef struct SQueryHint {
bool batchScan;
} SQueryHint;
typedef struct {
int32_t tsOffset; // offset value in current msg body, NOTE: ts list is compressed
int32_t tsLen; // total length of ts comp block
@ -759,12 +975,18 @@ typedef struct {
int64_t offset;
} SInterval;
typedef struct {
int32_t code;
typedef struct STbVerInfo {
char tbFName[TSDB_TABLE_FNAME_LEN];
int32_t sversion;
int32_t tversion;
} STbVerInfo;
typedef struct {
int32_t code;
int64_t affectedRows;
SArray* tbVerInfo; // STbVerInfo
} SQueryTableRsp;
int32_t tSerializeSQueryTableRsp(void* buf, int32_t bufLen, SQueryTableRsp* pRsp);
@ -1163,6 +1385,9 @@ typedef struct {
int32_t vgId;
int8_t syncState;
int8_t syncRestore;
int64_t syncTerm;
int64_t roleTimeMs;
int64_t startTimeMs;
int8_t syncCanRead;
int64_t cacheUsage;
int64_t numOfTables;
@ -1176,12 +1401,13 @@ typedef struct {
int64_t numOfBatchInsertReqs;
int64_t numOfBatchInsertSuccessReqs;
int32_t numOfCachedTables;
int32_t learnerProgress; // use one reservered
} SVnodeLoad;
typedef struct {
int8_t syncState;
int8_t syncRestore;
int8_t syncState;
int64_t syncTerm;
int8_t syncRestore;
int64_t roleTimeMs;
} SMnodeLoad;
@ -1317,6 +1543,7 @@ typedef struct {
int8_t learnerReplica;
int8_t learnerSelfIndex;
SReplica learnerReplicas[TSDB_MAX_LEARNER_REPLICA];
int32_t changeVersion;
} SCreateVnodeReq;
int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq);
@ -1391,7 +1618,8 @@ typedef struct {
int8_t learnerSelfIndex;
int8_t learnerReplica;
SReplica learnerReplicas[TSDB_MAX_LEARNER_REPLICA];
} SAlterVnodeReplicaReq, SAlterVnodeTypeReq;
int32_t changeVersion;
} SAlterVnodeReplicaReq, SAlterVnodeTypeReq, SCheckLearnCatchupReq;
int32_t tSerializeSAlterVnodeReplicaReq(void* buf, int32_t bufLen, SAlterVnodeReplicaReq* pReq);
int32_t tDeserializeSAlterVnodeReplicaReq(void* buf, int32_t bufLen, SAlterVnodeReplicaReq* pReq);
@ -1409,7 +1637,8 @@ typedef struct {
int32_t dstVgId;
uint32_t hashBegin;
uint32_t hashEnd;
int64_t reserved;
int32_t changeVersion;
int32_t reserved;
} SAlterVnodeHashRangeReq;
int32_t tSerializeSAlterVnodeHashRangeReq(void* buf, int32_t bufLen, SAlterVnodeHashRangeReq* pReq);
@ -1831,12 +2060,26 @@ typedef struct {
int32_t tversion;
} SResReadyRsp;
typedef struct SOperatorParam {
int32_t opType;
int32_t downstreamIdx;
void* value;
SArray* pChildren; //SArray<SOperatorParam*>
} SOperatorParam;
typedef struct STableScanOperatorParam {
bool tableSeq;
SArray* pUidList;
} STableScanOperatorParam;
typedef struct {
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t taskId;
int32_t execId;
SMsgHead header;
uint64_t sId;
uint64_t queryId;
uint64_t taskId;
int32_t execId;
SOperatorParam* pOpParam;
} SResFetchReq;
int32_t tSerializeSResFetchReq(void* buf, int32_t bufLen, SResFetchReq* pReq);
@ -2387,6 +2630,9 @@ typedef struct {
int8_t type;
int8_t flags;
int32_t bytes;
bool hasColComment;
char* colComment;
int32_t colCommentLen;
// TSDB_ALTER_TABLE_DROP_COLUMN
// TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES
int8_t colModType;

View File

@ -85,6 +85,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_DND_MAX_MSG, "dnd-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_MNODE_TYPE, "dnode-alter-mnode-type", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_ALTER_VNODE_TYPE, "dnode-alter-vnode-type", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, "dnode-check-vnode-learner-catchup", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_MND_MSG)
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
@ -254,7 +255,6 @@ enum {
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY, "stream-scan-history", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY_FINISH, "stream-scan-history-finish", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TRANSFER_STATE, "stream-transfer-state", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECK, "stream-task-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT, "stream-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)

View File

@ -130,25 +130,25 @@
#define TK_NK_EQ 112
#define TK_USING 113
#define TK_TAGS 114
#define TK_BOOL 115
#define TK_TINYINT 116
#define TK_SMALLINT 117
#define TK_INT 118
#define TK_INTEGER 119
#define TK_BIGINT 120
#define TK_FLOAT 121
#define TK_DOUBLE 122
#define TK_BINARY 123
#define TK_NCHAR 124
#define TK_UNSIGNED 125
#define TK_JSON 126
#define TK_VARCHAR 127
#define TK_MEDIUMBLOB 128
#define TK_BLOB 129
#define TK_VARBINARY 130
#define TK_GEOMETRY 131
#define TK_DECIMAL 132
#define TK_COMMENT 133
#define TK_COMMENT 115
#define TK_BOOL 116
#define TK_TINYINT 117
#define TK_SMALLINT 118
#define TK_INT 119
#define TK_INTEGER 120
#define TK_BIGINT 121
#define TK_FLOAT 122
#define TK_DOUBLE 123
#define TK_BINARY 124
#define TK_NCHAR 125
#define TK_UNSIGNED 126
#define TK_JSON 127
#define TK_VARCHAR 128
#define TK_MEDIUMBLOB 129
#define TK_BLOB 130
#define TK_VARBINARY 131
#define TK_GEOMETRY 132
#define TK_DECIMAL 133
#define TK_MAX_DELAY 134
#define TK_WATERMARK 135
#define TK_ROLLUP 136
@ -276,91 +276,98 @@
#define TK_JOIN 258
#define TK_INNER 259
#define TK_SELECT 260
#define TK_DISTINCT 261
#define TK_WHERE 262
#define TK_PARTITION 263
#define TK_BY 264
#define TK_SESSION 265
#define TK_STATE_WINDOW 266
#define TK_EVENT_WINDOW 267
#define TK_SLIDING 268
#define TK_FILL 269
#define TK_VALUE 270
#define TK_VALUE_F 271
#define TK_NONE 272
#define TK_PREV 273
#define TK_NULL_F 274
#define TK_LINEAR 275
#define TK_NEXT 276
#define TK_HAVING 277
#define TK_RANGE 278
#define TK_EVERY 279
#define TK_ORDER 280
#define TK_SLIMIT 281
#define TK_SOFFSET 282
#define TK_LIMIT 283
#define TK_OFFSET 284
#define TK_ASC 285
#define TK_NULLS 286
#define TK_ABORT 287
#define TK_AFTER 288
#define TK_ATTACH 289
#define TK_BEFORE 290
#define TK_BEGIN 291
#define TK_BITAND 292
#define TK_BITNOT 293
#define TK_BITOR 294
#define TK_BLOCKS 295
#define TK_CHANGE 296
#define TK_COMMA 297
#define TK_CONCAT 298
#define TK_CONFLICT 299
#define TK_COPY 300
#define TK_DEFERRED 301
#define TK_DELIMITERS 302
#define TK_DETACH 303
#define TK_DIVIDE 304
#define TK_DOT 305
#define TK_EACH 306
#define TK_FAIL 307
#define TK_FILE 308
#define TK_FOR 309
#define TK_GLOB 310
#define TK_ID 311
#define TK_IMMEDIATE 312
#define TK_IMPORT 313
#define TK_INITIALLY 314
#define TK_INSTEAD 315
#define TK_ISNULL 316
#define TK_KEY 317
#define TK_MODULES 318
#define TK_NK_BITNOT 319
#define TK_NK_SEMI 320
#define TK_NOTNULL 321
#define TK_OF 322
#define TK_PLUS 323
#define TK_PRIVILEGE 324
#define TK_RAISE 325
#define TK_RESTRICT 326
#define TK_ROW 327
#define TK_SEMI 328
#define TK_STAR 329
#define TK_STATEMENT 330
#define TK_STRICT 331
#define TK_STRING 332
#define TK_TIMES 333
#define TK_VALUES 334
#define TK_VARIABLE 335
#define TK_VIEW 336
#define TK_WAL 337
#define TK_NK_HINT 261
#define TK_DISTINCT 262
#define TK_WHERE 263
#define TK_PARTITION 264
#define TK_BY 265
#define TK_SESSION 266
#define TK_STATE_WINDOW 267
#define TK_EVENT_WINDOW 268
#define TK_SLIDING 269
#define TK_FILL 270
#define TK_VALUE 271
#define TK_VALUE_F 272
#define TK_NONE 273
#define TK_PREV 274
#define TK_NULL_F 275
#define TK_LINEAR 276
#define TK_NEXT 277
#define TK_HAVING 278
#define TK_RANGE 279
#define TK_EVERY 280
#define TK_ORDER 281
#define TK_SLIMIT 282
#define TK_SOFFSET 283
#define TK_LIMIT 284
#define TK_OFFSET 285
#define TK_ASC 286
#define TK_NULLS 287
#define TK_ABORT 288
#define TK_AFTER 289
#define TK_ATTACH 290
#define TK_BEFORE 291
#define TK_BEGIN 292
#define TK_BITAND 293
#define TK_BITNOT 294
#define TK_BITOR 295
#define TK_BLOCKS 296
#define TK_CHANGE 297
#define TK_COMMA 298
#define TK_CONCAT 299
#define TK_CONFLICT 300
#define TK_COPY 301
#define TK_DEFERRED 302
#define TK_DELIMITERS 303
#define TK_DETACH 304
#define TK_DIVIDE 305
#define TK_DOT 306
#define TK_EACH 307
#define TK_FAIL 308
#define TK_FILE 309
#define TK_FOR 310
#define TK_GLOB 311
#define TK_ID 312
#define TK_IMMEDIATE 313
#define TK_IMPORT 314
#define TK_INITIALLY 315
#define TK_INSTEAD 316
#define TK_ISNULL 317
#define TK_KEY 318
#define TK_MODULES 319
#define TK_NK_BITNOT 320
#define TK_NK_SEMI 321
#define TK_NOTNULL 322
#define TK_OF 323
#define TK_PLUS 324
#define TK_PRIVILEGE 325
#define TK_RAISE 326
#define TK_RESTRICT 327
#define TK_ROW 328
#define TK_SEMI 329
#define TK_STAR 330
#define TK_STATEMENT 331
#define TK_STRICT 332
#define TK_STRING 333
#define TK_TIMES 334
#define TK_VALUES 335
#define TK_VARIABLE 336
#define TK_VIEW 337
#define TK_WAL 338
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601
#define TK_NK_ILLEGAL 602
#define TK_NK_HEX 603 // hex number 0x123
#define TK_NK_OCT 604 // oct number
#define TK_NK_BIN 605 // bin format data 0b111
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601
#define TK_NK_ILLEGAL 602
#define TK_NK_HEX 603 // hex number 0x123
#define TK_NK_OCT 604 // oct number
#define TK_NK_BIN 605 // bin format data 0b111
#define TK_BATCH_SCAN 606
#define TK_NO_BATCH_SCAN 607
#define TK_NK_NIL 65535

View File

@ -97,6 +97,8 @@ int32_t dsPutDataBlock(DataSinkHandle handle, const SInputData* pInput, bool* pC
void dsEndPut(DataSinkHandle handle, uint64_t useconds);
void dsReset(DataSinkHandle handle);
/**
* Get the length of the data returned by the next call to dsGetDataBlock.
* @param handle

View File

@ -130,6 +130,10 @@ int32_t qSetSMAInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks,
*/
int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd);
bool qIsDynamicExecTask(qTaskInfo_t tinfo);
void qUpdateOperatorParam(qTaskInfo_t tinfo, void* pParam);
/**
* Create the exec task object according to task json
* @param readHandle
@ -150,7 +154,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
* @return
*/
int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion,
int32_t* tversion);
int32_t* tversion, int32_t idx);
/**
* The main task execution function, including query on both table and multiple tables,

View File

@ -98,6 +98,18 @@ typedef struct SMTbCursor {
int8_t paused;
} SMTbCursor;
typedef struct SMCtbCursor {
SMeta *pMeta;
void *pCur;
tb_uid_t suid;
void *pKey;
void *pVal;
int kLen;
int vLen;
int8_t paused;
int lock;
} SMCtbCursor;
typedef struct SRowBuffPos {
void* pRowBuff;
void* pKey;
@ -278,13 +290,17 @@ typedef struct SStoreMeta {
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
// metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
int64_t (*getNumOfRowsInMem)(void* pVnode);
/**
int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list);
*/
SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock);
int32_t (*resumeCtbCursor)(SMCtbCursor* pCtbCur, int8_t first);
void (*pauseCtbCursor)(SMCtbCursor* pCtbCur);
void (*closeCtbCursor)(SMCtbCursor *pCtbCur);
tb_uid_t (*ctbCursorNext)(SMCtbCursor* pCur);
} SStoreMeta;
typedef struct SStoreMetaReader {

View File

@ -122,6 +122,8 @@ typedef enum EFunctionType {
FUNCTION_TYPE_IROWTS,
FUNCTION_TYPE_ISFILLED,
FUNCTION_TYPE_TAGS,
FUNCTION_TYPE_TBUID,
FUNCTION_TYPE_VGID,
// internal function
FUNCTION_TYPE_SELECT_VALUE = 3750,
@ -157,6 +159,8 @@ typedef enum EFunctionType {
FUNCTION_TYPE_AVG_MERGE,
FUNCTION_TYPE_STDDEV_PARTIAL,
FUNCTION_TYPE_STDDEV_MERGE,
FUNCTION_TYPE_IRATE_PARTIAL,
FUNCTION_TYPE_IRATE_MERGE,
// geometry functions
FUNCTION_TYPE_GEOM_FROM_TEXT = 4250,

View File

@ -23,10 +23,11 @@ extern "C" {
#include "query.h"
#include "querynodes.h"
#define DESCRIBE_RESULT_COLS 4
#define DESCRIBE_RESULT_FIELD_LEN (TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE)
#define DESCRIBE_RESULT_TYPE_LEN (20 + VARSTR_HEADER_SIZE)
#define DESCRIBE_RESULT_NOTE_LEN (8 + VARSTR_HEADER_SIZE)
#define DESCRIBE_RESULT_COLS 5
#define DESCRIBE_RESULT_FIELD_LEN (TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE)
#define DESCRIBE_RESULT_TYPE_LEN (20 + VARSTR_HEADER_SIZE)
#define DESCRIBE_RESULT_NOTE_LEN (8 + VARSTR_HEADER_SIZE)
#define DESCRIBE_RESULT_COL_COMMENT_LEN (TSDB_COL_COMMENT_LEN)
#define SHOW_CREATE_DB_RESULT_COLS 2
#define SHOW_CREATE_DB_RESULT_FIELD1_LEN (TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE)
@ -155,7 +156,7 @@ typedef struct SColumnDefNode {
ENodeType type;
char colName[TSDB_COL_NAME_LEN];
SDataType dataType;
char comments[TSDB_TB_COMMENT_LEN];
char comments[TSDB_COL_COMMENT_LEN];
bool sma;
} SColumnDefNode;
@ -214,6 +215,7 @@ typedef struct SAlterTableStmt {
char newColName[TSDB_COL_NAME_LEN];
STableOptions* pOptions;
SDataType dataType;
char colComment[TSDB_COL_COMMENT_LEN];
SValueNode* pVal;
} SAlterTableStmt;

View File

@ -21,6 +21,7 @@ extern "C" {
#endif
#include "tdef.h"
#include "tmsg.h"
#define nodeType(nodeptr) (((const SNode*)(nodeptr))->type)
#define setNodeType(nodeptr, nodetype) (((SNode*)(nodeptr))->type = (nodetype))
@ -78,208 +79,6 @@ extern "C" {
(list) = NULL; \
} while (0)
typedef enum ENodeType {
// Syntax nodes are used in parser and planner module, and some are also used in executor module, such as COLUMN,
// VALUE, OPERATOR, FUNCTION and so on.
QUERY_NODE_COLUMN = 1,
QUERY_NODE_VALUE,
QUERY_NODE_OPERATOR,
QUERY_NODE_LOGIC_CONDITION,
QUERY_NODE_FUNCTION,
QUERY_NODE_REAL_TABLE,
QUERY_NODE_TEMP_TABLE,
QUERY_NODE_JOIN_TABLE,
QUERY_NODE_GROUPING_SET,
QUERY_NODE_ORDER_BY_EXPR,
QUERY_NODE_LIMIT,
QUERY_NODE_STATE_WINDOW,
QUERY_NODE_SESSION_WINDOW,
QUERY_NODE_INTERVAL_WINDOW,
QUERY_NODE_NODE_LIST,
QUERY_NODE_FILL,
QUERY_NODE_RAW_EXPR, // Only be used in parser module.
QUERY_NODE_TARGET,
QUERY_NODE_DATABLOCK_DESC,
QUERY_NODE_SLOT_DESC,
QUERY_NODE_COLUMN_DEF,
QUERY_NODE_DOWNSTREAM_SOURCE,
QUERY_NODE_DATABASE_OPTIONS,
QUERY_NODE_TABLE_OPTIONS,
QUERY_NODE_INDEX_OPTIONS,
QUERY_NODE_EXPLAIN_OPTIONS,
QUERY_NODE_STREAM_OPTIONS,
QUERY_NODE_LEFT_VALUE,
QUERY_NODE_COLUMN_REF,
QUERY_NODE_WHEN_THEN,
QUERY_NODE_CASE_WHEN,
QUERY_NODE_EVENT_WINDOW,
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100,
QUERY_NODE_SELECT_STMT,
QUERY_NODE_VNODE_MODIFY_STMT,
QUERY_NODE_CREATE_DATABASE_STMT,
QUERY_NODE_DROP_DATABASE_STMT,
QUERY_NODE_ALTER_DATABASE_STMT,
QUERY_NODE_FLUSH_DATABASE_STMT,
QUERY_NODE_TRIM_DATABASE_STMT,
QUERY_NODE_CREATE_TABLE_STMT,
QUERY_NODE_CREATE_SUBTABLE_CLAUSE,
QUERY_NODE_CREATE_MULTI_TABLES_STMT,
QUERY_NODE_DROP_TABLE_CLAUSE,
QUERY_NODE_DROP_TABLE_STMT,
QUERY_NODE_DROP_SUPER_TABLE_STMT,
QUERY_NODE_ALTER_TABLE_STMT,
QUERY_NODE_ALTER_SUPER_TABLE_STMT,
QUERY_NODE_CREATE_USER_STMT,
QUERY_NODE_ALTER_USER_STMT,
QUERY_NODE_DROP_USER_STMT,
QUERY_NODE_USE_DATABASE_STMT,
QUERY_NODE_CREATE_DNODE_STMT,
QUERY_NODE_DROP_DNODE_STMT,
QUERY_NODE_ALTER_DNODE_STMT,
QUERY_NODE_CREATE_INDEX_STMT,
QUERY_NODE_DROP_INDEX_STMT,
QUERY_NODE_CREATE_QNODE_STMT,
QUERY_NODE_DROP_QNODE_STMT,
QUERY_NODE_CREATE_BNODE_STMT,
QUERY_NODE_DROP_BNODE_STMT,
QUERY_NODE_CREATE_SNODE_STMT,
QUERY_NODE_DROP_SNODE_STMT,
QUERY_NODE_CREATE_MNODE_STMT,
QUERY_NODE_DROP_MNODE_STMT,
QUERY_NODE_CREATE_TOPIC_STMT,
QUERY_NODE_DROP_TOPIC_STMT,
QUERY_NODE_DROP_CGROUP_STMT,
QUERY_NODE_ALTER_LOCAL_STMT,
QUERY_NODE_EXPLAIN_STMT,
QUERY_NODE_DESCRIBE_STMT,
QUERY_NODE_RESET_QUERY_CACHE_STMT,
QUERY_NODE_COMPACT_DATABASE_STMT,
QUERY_NODE_CREATE_FUNCTION_STMT,
QUERY_NODE_DROP_FUNCTION_STMT,
QUERY_NODE_CREATE_STREAM_STMT,
QUERY_NODE_DROP_STREAM_STMT,
QUERY_NODE_BALANCE_VGROUP_STMT,
QUERY_NODE_MERGE_VGROUP_STMT,
QUERY_NODE_REDISTRIBUTE_VGROUP_STMT,
QUERY_NODE_SPLIT_VGROUP_STMT,
QUERY_NODE_SYNCDB_STMT,
QUERY_NODE_GRANT_STMT,
QUERY_NODE_REVOKE_STMT,
QUERY_NODE_SHOW_DNODES_STMT,
QUERY_NODE_SHOW_MNODES_STMT,
QUERY_NODE_SHOW_MODULES_STMT,
QUERY_NODE_SHOW_QNODES_STMT,
QUERY_NODE_SHOW_SNODES_STMT,
QUERY_NODE_SHOW_BNODES_STMT,
QUERY_NODE_SHOW_CLUSTER_STMT,
QUERY_NODE_SHOW_DATABASES_STMT,
QUERY_NODE_SHOW_FUNCTIONS_STMT,
QUERY_NODE_SHOW_INDEXES_STMT,
QUERY_NODE_SHOW_STABLES_STMT,
QUERY_NODE_SHOW_STREAMS_STMT,
QUERY_NODE_SHOW_TABLES_STMT,
QUERY_NODE_SHOW_TAGS_STMT,
QUERY_NODE_SHOW_USERS_STMT,
QUERY_NODE_SHOW_LICENCES_STMT,
QUERY_NODE_SHOW_VGROUPS_STMT,
QUERY_NODE_SHOW_TOPICS_STMT,
QUERY_NODE_SHOW_CONSUMERS_STMT,
QUERY_NODE_SHOW_CONNECTIONS_STMT,
QUERY_NODE_SHOW_QUERIES_STMT,
QUERY_NODE_SHOW_APPS_STMT,
QUERY_NODE_SHOW_VARIABLES_STMT,
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_USER_PRIVILEGES_STMT,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
QUERY_NODE_SHOW_SCORES_STMT,
QUERY_NODE_SHOW_TABLE_TAGS_STMT,
QUERY_NODE_KILL_CONNECTION_STMT,
QUERY_NODE_KILL_QUERY_STMT,
QUERY_NODE_KILL_TRANSACTION_STMT,
QUERY_NODE_DELETE_STMT,
QUERY_NODE_INSERT_STMT,
QUERY_NODE_QUERY,
QUERY_NODE_SHOW_DB_ALIVE_STMT,
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
QUERY_NODE_RESTORE_DNODE_STMT,
QUERY_NODE_RESTORE_QNODE_STMT,
QUERY_NODE_RESTORE_MNODE_STMT,
QUERY_NODE_RESTORE_VNODE_STMT,
QUERY_NODE_PAUSE_STREAM_STMT,
QUERY_NODE_RESUME_STREAM_STMT,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
QUERY_NODE_LOGIC_PLAN_JOIN,
QUERY_NODE_LOGIC_PLAN_AGG,
QUERY_NODE_LOGIC_PLAN_PROJECT,
QUERY_NODE_LOGIC_PLAN_VNODE_MODIFY,
QUERY_NODE_LOGIC_PLAN_EXCHANGE,
QUERY_NODE_LOGIC_PLAN_MERGE,
QUERY_NODE_LOGIC_PLAN_WINDOW,
QUERY_NODE_LOGIC_PLAN_FILL,
QUERY_NODE_LOGIC_PLAN_SORT,
QUERY_NODE_LOGIC_PLAN_PARTITION,
QUERY_NODE_LOGIC_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_LOGIC_PLAN_INTERP_FUNC,
QUERY_NODE_LOGIC_SUBPLAN,
QUERY_NODE_LOGIC_PLAN,
// physical plan node
QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN = 1100,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN,
QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN,
QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN,
QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN,
QUERY_NODE_PHYSICAL_PLAN_PROJECT,
QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN,
QUERY_NODE_PHYSICAL_PLAN_HASH_AGG,
QUERY_NODE_PHYSICAL_PLAN_EXCHANGE,
QUERY_NODE_PHYSICAL_PLAN_MERGE,
QUERY_NODE_PHYSICAL_PLAN_SORT,
QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT,
QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_FILL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION,
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
QUERY_NODE_PHYSICAL_PLAN_INSERT,
QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT,
QUERY_NODE_PHYSICAL_PLAN_DELETE,
QUERY_NODE_PHYSICAL_SUBPLAN,
QUERY_NODE_PHYSICAL_PLAN,
QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN,
QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT,
QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT
} ENodeType;
/**
* The first field of a node of any type is guaranteed to be the ENodeType.
* Hence the type of any node can be gotten by casting it to SNode.

View File

@ -42,10 +42,13 @@ typedef enum EGroupAction {
typedef struct SLogicNode {
ENodeType type;
bool dynamicOp;
bool stmtRoot;
SNodeList* pTargets; // SColumnNode
SNode* pConditions;
SNodeList* pChildren;
struct SLogicNode* pParent;
SNodeList* pHint;
int32_t optimizedFlag;
uint8_t precision;
SNode* pLimit;
@ -107,15 +110,21 @@ typedef struct SScanLogicNode {
bool sortPrimaryKey;
bool igLastNull;
bool groupOrderScan;
bool onlyMetaCtbIdx; // for tag scan with no tbname
} SScanLogicNode;
typedef struct SJoinLogicNode {
SLogicNode node;
EJoinType joinType;
SNode* pMergeCondition;
SNode* pOnConditions;
bool isSingleTableJoin;
SNode* pColEqualOnConditions;
SLogicNode node;
EJoinType joinType;
EJoinAlgorithm joinAlgo;
SNode* pPrimKeyEqCond;
SNode* pColEqCond;
SNode* pTagEqCond;
SNode* pTagOnCond;
SNode* pOtherOnCond;
bool isSingleTableJoin;
bool hasSubQuery;
bool isLowLevelJoin;
} SJoinLogicNode;
typedef struct SAggLogicNode {
@ -154,6 +163,28 @@ typedef struct SInterpFuncLogicNode {
SNode* pTimeSeries; // SColumnNode
} SInterpFuncLogicNode;
typedef struct SGroupCacheLogicNode {
SLogicNode node;
bool grpColsMayBeNull;
bool grpByUid;
bool globalGrp;
bool batchFetch;
SNodeList* pGroupCols;
} SGroupCacheLogicNode;
typedef struct SDynQueryCtrlStbJoin {
bool batchFetch;
SNodeList* pVgList;
SNodeList* pUidList;
bool srcScan[2];
} SDynQueryCtrlStbJoin;
typedef struct SDynQueryCtrlLogicNode {
SLogicNode node;
EDynQueryType qType;
SDynQueryCtrlStbJoin stbJoin;
} SDynQueryCtrlLogicNode;
typedef enum EModifyTableType { MODIFY_TABLE_TYPE_INSERT = 1, MODIFY_TABLE_TYPE_DELETE } EModifyTableType;
typedef struct SVnodeModifyLogicNode {
@ -312,6 +343,7 @@ typedef struct SDataBlockDescNode {
typedef struct SPhysiNode {
ENodeType type;
bool dynamicOp;
EOrder inputTsOrder;
EOrder outputTsOrder;
SDataBlockDescNode* pOutputDataBlockDesc;
@ -334,7 +366,11 @@ typedef struct SScanPhysiNode {
bool groupOrderScan;
} SScanPhysiNode;
typedef SScanPhysiNode STagScanPhysiNode;
typedef struct STagScanPhysiNode {
SScanPhysiNode scan;
bool onlyMetaCtbIdx; //no tbname, tag index not used.
} STagScanPhysiNode;
typedef SScanPhysiNode SBlockDistScanPhysiNode;
typedef struct SLastRowScanPhysiNode {
@ -409,12 +445,50 @@ typedef struct SInterpFuncPhysiNode {
typedef struct SSortMergeJoinPhysiNode {
SPhysiNode node;
EJoinType joinType;
SNode* pMergeCondition;
SNode* pOnConditions;
SNode* pPrimKeyCond;
SNode* pColEqCond;
SNode* pOtherOnCond;
SNodeList* pTargets;
SNode* pColEqualOnConditions;
} SSortMergeJoinPhysiNode;
typedef struct SHashJoinPhysiNode {
SPhysiNode node;
EJoinType joinType;
SNodeList* pOnLeft;
SNodeList* pOnRight;
SNode* pFilterConditions;
SNodeList* pTargets;
SQueryStat inputStat[2];
SNode* pPrimKeyCond;
SNode* pColEqCond;
SNode* pTagEqCond;
} SHashJoinPhysiNode;
typedef struct SGroupCachePhysiNode {
SPhysiNode node;
bool grpColsMayBeNull;
bool grpByUid;
bool globalGrp;
bool batchFetch;
SNodeList* pGroupCols;
} SGroupCachePhysiNode;
typedef struct SStbJoinDynCtrlBasic {
bool batchFetch;
int32_t vgSlot[2];
int32_t uidSlot[2];
bool srcScan[2];
} SStbJoinDynCtrlBasic;
typedef struct SDynQueryCtrlPhysiNode {
SPhysiNode node;
EDynQueryType qType;
union {
SStbJoinDynCtrlBasic stbJoin;
};
} SDynQueryCtrlPhysiNode;
typedef struct SAggPhysiNode {
SPhysiNode node;
SNodeList* pExprs; // these are expression list of group_by_clause and parameter expression of aggregate function
@ -603,6 +677,8 @@ typedef struct SSubplan {
SNode* pTagCond;
SNode* pTagIndexCond;
bool showRewrite;
int32_t rowsThreshold;
bool dynamicRowThreshold;
} SSubplan;
typedef enum EExplainMode { EXPLAIN_MODE_DISABLE = 1, EXPLAIN_MODE_STATIC, EXPLAIN_MODE_ANALYZE } EExplainMode;

View File

@ -116,6 +116,17 @@ typedef struct SLeftValueNode {
ENodeType type;
} SLeftValueNode;
typedef enum EHintOption {
HINT_NO_BATCH_SCAN = 1,
HINT_BATCH_SCAN,
} EHintOption;
typedef struct SHintNode {
ENodeType type;
EHintOption option;
void* value;
} SHintNode;
typedef struct SOperatorNode {
SExprNode node; // QUERY_NODE_OPERATOR
EOperatorType opType;
@ -169,11 +180,27 @@ typedef struct STempTableNode {
SNode* pSubquery;
} STempTableNode;
typedef enum EJoinType { JOIN_TYPE_INNER = 1 } EJoinType;
typedef enum EJoinType {
JOIN_TYPE_INNER = 1,
JOIN_TYPE_LEFT,
JOIN_TYPE_RIGHT,
} EJoinType;
typedef enum EJoinAlgorithm {
JOIN_ALGO_UNKNOWN = 0,
JOIN_ALGO_MERGE,
JOIN_ALGO_HASH,
} EJoinAlgorithm;
typedef enum EDynQueryType {
DYN_QTYPE_STB_HASH = 1,
} EDynQueryType;
typedef struct SJoinTableNode {
STableNode table; // QUERY_NODE_JOIN_TABLE
EJoinType joinType;
bool hasSubQuery;
bool isLowLevelJoin;
SNode* pLeft;
SNode* pRight;
SNode* pOnCond;
@ -289,6 +316,7 @@ typedef struct SSelectStmt {
SLimitNode* pLimit;
SLimitNode* pSlimit;
STimeWindow timeRange;
SNodeList* pHint;
char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision;
int32_t selectFuncNum;
@ -470,7 +498,7 @@ int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char*
int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols);
typedef bool (*FFuncClassifier)(int32_t funcId);
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifier classifier, SNodeList** pFuncs);
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, char* tableAlias, FFuncClassifier classifier, SNodeList** pFuncs);
int32_t nodesCollectSpecialNodes(SSelectStmt* pSelect, ESqlClause clause, ENodeType type, SNodeList** pNodes);

View File

@ -90,11 +90,6 @@ typedef struct SExecResult {
void* res;
} SExecResult;
typedef struct STbVerInfo {
char tbFName[TSDB_TABLE_FNAME_LEN];
int32_t sversion;
int32_t tversion;
} STbVerInfo;
#pragma pack(push, 1)
typedef struct SCTableMeta {
@ -212,6 +207,11 @@ typedef struct SQueryNodeStat {
int32_t tableNum; // vg table number, unit is TSDB_TABLE_NUM_UNIT
} SQueryNodeStat;
typedef struct SQueryStat {
int64_t inputRowNum;
int32_t inputRowSize;
} SQueryStat;
int32_t initTaskQueue();
int32_t cleanupTaskQueue();

View File

@ -95,6 +95,8 @@ int32_t qStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *p
int32_t qEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qTbnameFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qTbUidFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
int32_t qVgIdFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
/* Aggregation functions */
int32_t countScalarFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);

View File

@ -122,6 +122,7 @@ typedef struct {
int8_t type;
int32_t srcVgId;
int32_t srcTaskId;
int32_t childId;
int64_t sourceVer;
int64_t reqId;
@ -251,6 +252,7 @@ typedef struct SStreamChildEpInfo {
int32_t nodeId;
int32_t childId;
int32_t taskId;
int8_t dataAllowed;
SEpSet epSet;
} SStreamChildEpInfo;
@ -272,6 +274,7 @@ typedef struct SStreamStatus {
int8_t schedStatus;
int8_t keepTaskStatus;
bool transferState;
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
int8_t timerActive; // timer is active
int8_t pauseAllowed; // allowed task status to be set to be paused
} SStreamStatus;
@ -399,8 +402,9 @@ typedef struct {
typedef struct {
int64_t streamId;
int32_t type;
int32_t taskId;
int32_t dataSrcVgId;
int32_t srcVgId;
int32_t upstreamTaskId;
int32_t upstreamChildId;
int32_t upstreamNodeId;
@ -570,8 +574,6 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq);
int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, int32_t numOfBlocks,
int64_t dstTaskId);
void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
@ -579,6 +581,8 @@ int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
int32_t streamProcessRunReq(SStreamTask* pTask);
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec);
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code);
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
@ -589,7 +593,6 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock
bool streamTaskShouldStop(const SStreamStatus* pStatus);
bool streamTaskShouldPause(const SStreamStatus* pStatus);
bool streamTaskIsIdle(const SStreamTask* pTask);
int32_t streamTaskEndScanWAL(SStreamTask* pTask);
SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId);
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize);
@ -626,7 +629,7 @@ int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* p
int32_t streamSourceScanHistoryData(SStreamTask* pTask);
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
int32_t streamDispatchTransferStateMsg(SStreamTask* pTask);
int32_t appendTranstateIntoInputQ(SStreamTask* pTask);
// agg level
int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask);

View File

@ -101,6 +101,7 @@ typedef struct SSyncCfg {
int32_t myIndex;
SNodeInfo nodeInfo[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
SyncIndex lastIndex;
int32_t changeVersion;
} SSyncCfg;
typedef struct SFsmCbMeta {
@ -239,19 +240,22 @@ typedef struct SSyncState {
ESyncState state;
bool restored;
bool canRead;
int32_t progress;
SyncTerm term;
int64_t roleTimeMs;
int64_t startTimeMs;
} SSyncState;
int32_t syncInit();
void syncCleanUp();
int64_t syncOpen(SSyncInfo* pSyncInfo);
int32_t syncStart(int64_t rid);
void syncStop(int64_t rid);
void syncPreStop(int64_t rid);
void syncPostStop(int64_t rid);
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq);
int32_t syncIsCatchUp(int64_t rid);
int32_t syncInit();
void syncCleanUp();
int64_t syncOpen(SSyncInfo* pSyncInfo, int32_t vnodeVersion);
int32_t syncStart(int64_t rid);
void syncStop(int64_t rid);
void syncPreStop(int64_t rid);
void syncPostStop(int64_t rid);
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak, int64_t* seq);
int32_t syncCheckMember(int64_t rid);
int32_t syncIsCatchUp(int64_t rid);
ESyncRole syncGetRole(int64_t rid);
int32_t syncProcessMsg(int64_t rid, SRpcMsg* pMsg);
int32_t syncReconfig(int64_t rid, SSyncCfg* pCfg);
@ -269,6 +273,8 @@ SSyncState syncGetState(int64_t rid);
void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet);
const char* syncStr(ESyncState state);
int32_t syncNodeGetConfig(int64_t rid, SSyncCfg *cfg);
#ifdef __cplusplus
}
#endif

View File

@ -153,7 +153,6 @@ struct SWalReader {
int64_t capacity;
TdThreadMutex mutex;
SWalFilterCond cond;
// TODO remove it
SWalCkHead *pHead;
};
@ -207,10 +206,9 @@ void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64
void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset);
// only for tq usage
void walSetReaderCapacity(SWalReader *pRead, int32_t capacity);
int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead);
int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead);
int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead);
int32_t walFetchHead(SWalReader *pRead, int64_t ver);
int32_t walFetchBody(SWalReader *pRead);
int32_t walSkipFetchBody(SWalReader *pRead);
void walRefFirstVer(SWal *, SWalRef *);
void walRefLastVer(SWal *, SWalRef *);

View File

@ -76,7 +76,7 @@ int32_t taosUnLockFile(TdFilePtr pFile);
int32_t taosUmaskFile(int32_t maskVal);
int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime);
int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime);
int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno);
int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime);
bool taosCheckExistFile(const char *pathname);

View File

@ -32,6 +32,8 @@ void taosSeedRand(uint32_t seed);
uint32_t taosRand(void);
uint32_t taosRandR(uint32_t* pSeed);
void taosRandStr(char* str, int32_t size);
void taosRandStr2(char* str, int32_t size);
uint32_t taosSafeRand(void);
#ifdef __cplusplus

View File

@ -191,6 +191,7 @@ int32_t* taosGetErrno();
// #define TSDB_CODE_MND_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0313) // 2.x
// #define TSDB_CODE_MND_FAILED_TO_INIT_STEP TAOS_DEF_ERROR_CODE(0, 0x0314) // 2.x
#define TSDB_CODE_MND_USER_DISABLED TAOS_DEF_ERROR_CODE(0, 0x0315)
#define TSDB_CODE_MND_INVALID_PLATFORM TAOS_DEF_ERROR_CODE(0, 0x0316)
// mnode-sdb
#define TSDB_CODE_SDB_OBJ_ALREADY_THERE TAOS_DEF_ERROR_CODE(0, 0x0320) // internal
@ -517,6 +518,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_QRY_JOB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x072F)
#define TSDB_CODE_QRY_QWORKER_QUIT TAOS_DEF_ERROR_CODE(0, 0x0730)
#define TSDB_CODE_QRY_GEO_NOT_SUPPORT_ERROR TAOS_DEF_ERROR_CODE(0, 0x0731)
#define TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x0732)
// grant
#define TSDB_CODE_GRANT_EXPIRED TAOS_DEF_ERROR_CODE(0, 0x0800)
@ -713,6 +715,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
#define TSDB_CODE_PLAN_EXPECTED_TS_EQUAL TAOS_DEF_ERROR_CODE(0, 0x2701)
#define TSDB_CODE_PLAN_NOT_SUPPORT_CROSS_JOIN TAOS_DEF_ERROR_CODE(0, 0x2702)
#define TSDB_CODE_PLAN_NOT_SUPPORT_JOIN_COND TAOS_DEF_ERROR_CODE(0, 0x2703)
//function
#define TSDB_CODE_FUNC_FUNTION_ERROR TAOS_DEF_ERROR_CODE(0, 0x2800)

View File

@ -230,6 +230,7 @@ typedef enum ELogicConditionType {
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN
#define TSDB_TB_COMMENT_LEN 1025
#define TSDB_COL_COMMENT_LEN 1025
#define TSDB_QUERY_ID_LEN 26
#define TSDB_TRANS_OPER_LEN 16

View File

@ -16,7 +16,7 @@ target_include_directories(
target_link_libraries(
taos
INTERFACE api
PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom
PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom geometry
)
if(TD_DARWIN_ARM64)
@ -57,7 +57,7 @@ target_include_directories(
target_link_libraries(
taos_static
INTERFACE api
PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom
PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom geometry
)
if(${BUILD_TEST})

View File

@ -33,6 +33,7 @@ extern "C" {
#include "ttime.h"
#include "ttypes.h"
#include "cJSON.h"
#include "geosWrapper.h"
#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
# define expect(expr,value) (__builtin_expect ((expr),(value)) )
@ -192,7 +193,7 @@ typedef struct {
//
SArray *preLineTagKV;
SArray *maxTagKVs;
SArray *masColKVs;
SArray *maxColKVs;
SSmlLineInfo preLine;
STableMeta *currSTableMeta;

View File

@ -1073,6 +1073,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
return 0;
end:
taosHashCancelIterate(info->superTables, tmp);
taosHashCleanup(hashTmp);
taosMemoryFreeClear(pTableMeta);
catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1);
@ -1191,6 +1192,7 @@ void freeSSmlKv(void *data) {
SSmlKv *kv = (SSmlKv *)data;
if (kv->keyEscaped) taosMemoryFree((void *)(kv->key));
if (kv->valueEscaped) taosMemoryFree((void *)(kv->value));
if (kv->type == TSDB_DATA_TYPE_GEOMETRY) geosFreeBuffer((void *)(kv->value));
}
void smlDestroyInfo(SSmlHandle *info) {
@ -1433,6 +1435,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE);
if(code != TSDB_CODE_SUCCESS){
taosMemoryFree(measure);
taosHashCancelIterate(info->childTables, oneTable);
return code;
}
@ -1441,6 +1444,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " catalogGetTableHashVgroup failed. table name: %s", info->id, tableData->childTableName);
taosMemoryFree(measure);
taosHashCancelIterate(info->childTables, oneTable);
return code;
}
taosHashPut(info->pVgHash, (const char *)&vg.vgId, sizeof(vg.vgId), (char *)&vg, sizeof(vg));
@ -1450,6 +1454,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
if (unlikely(NULL == pMeta || NULL == (*pMeta)->tableMeta)) {
uError("SML:0x%" PRIx64 " NULL == pMeta. table name: %s", info->id, tableData->childTableName);
taosMemoryFree(measure);
taosHashCancelIterate(info->childTables, oneTable);
return TSDB_CODE_SML_INTERNAL_ERROR;
}
@ -1465,6 +1470,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
taosMemoryFree(measure);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBindData failed", info->id);
taosHashCancelIterate(info->childTables, oneTable);
return code;
}
oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, oneTable);

View File

@ -102,6 +102,30 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
return TSDB_CODE_TSC_INVALID_VALUE;
}
if (pVal->value[0] == 'g' || pVal->value[0] == 'G') { // geometry
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= sizeof("POINT")+3) {
int32_t code = initCtxGeomFromText();
if (code != TSDB_CODE_SUCCESS) {
return code;
}
char* tmp = taosMemoryCalloc(pVal->length, 1);
memcpy(tmp, pVal->value + 2, pVal->length - 3);
code = doGeomFromText(tmp, (unsigned char **)&pVal->value, &pVal->length);
taosMemoryFree(tmp);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
pVal->type = TSDB_DATA_TYPE_GEOMETRY;
if (pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
geosFreeBuffer((void*)(pVal->value));
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
}
return TSDB_CODE_SUCCESS;
}
return TSDB_CODE_TSC_INVALID_VALUE;
}
if (pVal->value[0] == 't' || pVal->value[0] == 'T') {
if (pVal->length == 1 ||
(pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R') &&
@ -390,14 +414,14 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
SSmlKv kv = {.key = tag->name, .keyLen = strlen(tag->name), .type = tag->type};
if (tag->type == TSDB_DATA_TYPE_NCHAR) {
kv.length = (tag->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
} else if (tag->type == TSDB_DATA_TYPE_BINARY) {
} else if (tag->type == TSDB_DATA_TYPE_BINARY || tag->type == TSDB_DATA_TYPE_GEOMETRY) {
kv.length = tag->bytes - VARSTR_HEADER_SIZE;
}
taosArrayPush((*tmp)->cols, &kv);
}
}
info->currSTableMeta = (*tmp)->tableMeta;
info->masColKVs = (*tmp)->cols;
info->maxColKVs = (*tmp)->cols;
}
}
@ -512,13 +536,13 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
freeSSmlKv(&kv);
return TSDB_CODE_SUCCESS;
}
if (cnt >= taosArrayGetSize(info->masColKVs)) {
if (cnt >= taosArrayGetSize(info->maxColKVs)) {
info->dataFormat = false;
info->reRun = true;
freeSSmlKv(&kv);
return TSDB_CODE_SUCCESS;
}
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->masColKVs, cnt);
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->maxColKVs, cnt);
if (kv.type != maxKV->type) {
info->dataFormat = false;
info->reRun = true;
@ -663,14 +687,15 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
if (info->dataFormat) {
uDebug("SML:0x%" PRIx64 " smlParseInfluxString format true, ts:%" PRId64, info->id, ts);
ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 0);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
ret = smlBuildRow(info->currTableDataCtx);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
if (ret == TSDB_CODE_SUCCESS) {
ret = smlBuildRow(info->currTableDataCtx);
}
clearColValArray(info->currTableDataCtx->pValues);
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
return ret;
}
} else {
uDebug("SML:0x%" PRIx64 " smlParseInfluxString format false, ts:%" PRId64, info->id, ts);
taosArraySet(elements->colArray, 0, &kv);

View File

@ -1863,10 +1863,10 @@ static int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* p
return 0;
}
static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* reqOffset, STqOffsetVal* rspOffset, int64_t sver, int64_t ever, int64_t consumerId){
static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* reqOffset, STqOffsetVal* rspOffset, int64_t sver, int64_t ever, int64_t consumerId, bool hasData){
if (!pVg->seekUpdated) {
tscDebug("consumer:0x%" PRIx64" local offset is update, since seekupdate not set", consumerId);
pVg->offsetInfo.beginOffset = *reqOffset;
if(hasData) pVg->offsetInfo.beginOffset = *reqOffset;
pVg->offsetInfo.endOffset = *rspOffset;
} else {
tscDebug("consumer:0x%" PRIx64" local offset is NOT update, since seekupdate is set", consumerId);
@ -1929,7 +1929,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
pVg->epSet = *pollRspWrapper->pEpset;
}
updateVgInfo(pVg, &pDataRsp->reqOffset, &pDataRsp->rspOffset, pDataRsp->head.walsver, pDataRsp->head.walever, tmq->consumerId);
updateVgInfo(pVg, &pDataRsp->reqOffset, &pDataRsp->rspOffset, pDataRsp->head.walsver, pDataRsp->head.walever, tmq->consumerId, pDataRsp->blockNum != 0);
char buf[TSDB_OFFSET_LEN] = {0};
tFormatOffset(buf, TSDB_OFFSET_LEN, &pDataRsp->rspOffset);
@ -1979,7 +1979,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
return NULL;
}
updateVgInfo(pVg, &pollRspWrapper->metaRsp.rspOffset, &pollRspWrapper->metaRsp.rspOffset, pollRspWrapper->metaRsp.head.walsver, pollRspWrapper->metaRsp.head.walever, tmq->consumerId);
updateVgInfo(pVg, &pollRspWrapper->metaRsp.rspOffset, &pollRspWrapper->metaRsp.rspOffset, pollRspWrapper->metaRsp.head.walsver, pollRspWrapper->metaRsp.head.walever, tmq->consumerId, true);
// build rsp
SMqMetaRspObj* pRsp = tmqBuildMetaRspFromWrapper(pollRspWrapper);
taosFreeQitem(pollRspWrapper);
@ -2007,7 +2007,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
return NULL;
}
updateVgInfo(pVg, &pollRspWrapper->taosxRsp.reqOffset, &pollRspWrapper->taosxRsp.rspOffset, pollRspWrapper->taosxRsp.head.walsver, pollRspWrapper->taosxRsp.head.walever, tmq->consumerId);
updateVgInfo(pVg, &pollRspWrapper->taosxRsp.reqOffset, &pollRspWrapper->taosxRsp.rspOffset, pollRspWrapper->taosxRsp.head.walsver, pollRspWrapper->taosxRsp.head.walever, tmq->consumerId, pollRspWrapper->taosxRsp.blockNum != 0);
if (pollRspWrapper->taosxRsp.blockNum == 0) {
tscDebug("consumer:0x%" PRIx64 " taosx empty block received, vgId:%d, vg total:%" PRId64 ", reqId:0x%" PRIx64,

View File

@ -20,7 +20,7 @@ TARGET_LINK_LIBRARIES(
ADD_EXECUTABLE(smlTest smlTest.cpp)
TARGET_LINK_LIBRARIES(
smlTest
PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom
PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom geometry
)
TARGET_INCLUDE_DIRECTORIES(

View File

@ -1442,178 +1442,4 @@ TEST(clientCase, sub_tb_mt_test) {
}
}
TEST(clientCase, ts_3756) {
// taos_options(TSDB_OPTION_CONFIGDIR, "~/first/cfg");
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
tmq_conf_t* conf = tmq_conf_new();
tmq_conf_set(conf, "enable.auto.commit", "false");
tmq_conf_set(conf, "auto.commit.interval.ms", "2000");
tmq_conf_set(conf, "group.id", "group_id_2");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "latest");
tmq_conf_set(conf, "msg.with.table.name", "false");
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
tmq_conf_destroy(conf);
// 创建订阅 topics 列表
tmq_list_t* topicList = tmq_list_new();
tmq_list_append(topicList, "tp");
// 启动订阅
tmq_subscribe(tmq, topicList);
tmq_list_destroy(topicList);
TAOS_FIELD* fields = NULL;
int32_t numOfFields = 0;
int32_t precision = 0;
int32_t totalRows = 0;
int32_t msgCnt = 0;
int32_t timeout = 200;
int32_t count = 0;
tmq_topic_assignment* pAssign = NULL;
int32_t numOfAssign = 0;
int32_t code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign);
if (code != 0) {
printf("error occurs:%s\n", tmq_err2str(code));
tmq_free_assignment(pAssign);
tmq_consumer_close(tmq);
taos_close(pConn);
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
return;
}
for(int i = 0; i < numOfAssign; i++){
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
}
// tmq_offset_seek(tmq, "tp", pAssign[0].vgId, 4);
tmq_free_assignment(pAssign);
code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign);
if (code != 0) {
printf("error occurs:%s\n", tmq_err2str(code));
tmq_free_assignment(pAssign);
tmq_consumer_close(tmq);
taos_close(pConn);
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
return;
}
for(int i = 0; i < numOfAssign; i++){
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
}
tmq_free_assignment(pAssign);
code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign);
if (code != 0) {
printf("error occurs:%s\n", tmq_err2str(code));
tmq_free_assignment(pAssign);
tmq_consumer_close(tmq);
taos_close(pConn);
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
return;
}
for(int i = 0; i < numOfAssign; i++){
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
}
while (1) {
printf("start to poll\n");
TAOS_RES* pRes = tmq_consumer_poll(tmq, timeout);
if (pRes) {
char buf[128];
const char* topicName = tmq_get_topic_name(pRes);
// const char* dbName = tmq_get_db_name(pRes);
// int32_t vgroupId = tmq_get_vgroup_id(pRes);
//
// printf("topic: %s\n", topicName);
// printf("db: %s\n", dbName);
// printf("vgroup id: %d\n", vgroupId);
printSubResults(pRes, &totalRows);
tmq_topic_assignment* pAssignTmp = NULL;
int32_t numOfAssignTmp = 0;
code = tmq_get_topic_assignment(tmq, "tp", &pAssignTmp, &numOfAssignTmp);
if (code != 0) {
printf("error occurs:%s\n", tmq_err2str(code));
tmq_free_assignment(pAssign);
tmq_consumer_close(tmq);
taos_close(pConn);
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
return;
}
for(int i = 0; i < numOfAssign; i++){
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssignTmp[i].vgId, pAssignTmp[i].currentOffset, pAssignTmp[i].begin, pAssignTmp[i].end);
}
if(numOfAssign != 0){
int i = 0;
for(; i < numOfAssign; i++){
if(pAssign[i].currentOffset != pAssignTmp[i].currentOffset){
break;
}
}
if(i == numOfAssign){
printf("all position is same\n");
break;
}
tmq_free_assignment(pAssign);
}
numOfAssign = numOfAssignTmp;
pAssign = pAssignTmp;
} else {
// tmq_offset_seek(tmq, "tp", pAssign[0].vgId, pAssign[0].currentOffset);
// tmq_offset_seek(tmq, "tp", pAssign[1].vgId, pAssign[1].currentOffset);
// tmq_commit_sync(tmq, pRes);
continue;
}
// tmq_commit_sync(tmq, pRes);
if (pRes != NULL) {
taos_free_result(pRes);
// if ((++count) > 1) {
// break;
// }
} else {
// break;
}
// tmq_offset_seek(tmq, "tp", pAssign[0].vgId, pAssign[0].begin);
}
tmq_free_assignment(pAssign);
code = tmq_get_topic_assignment(tmq, "tp", &pAssign, &numOfAssign);
if (code != 0) {
printf("error occurs:%s\n", tmq_err2str(code));
tmq_free_assignment(pAssign);
tmq_consumer_close(tmq);
taos_close(pConn);
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
return;
}
for(int i = 0; i < numOfAssign; i++){
printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end);
}
tmq_consumer_close(tmq);
taos_close(pConn);
fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
}
#pragma GCC diagnostic pop

View File

@ -16,6 +16,14 @@ ENDIF ()
IF (TD_STORAGE)
ADD_DEFINITIONS(-D_STORAGE)
TARGET_LINK_LIBRARIES(common PRIVATE storage)
IF(${TD_LINUX})
IF(${BUILD_WITH_COS})
add_definitions(-DUSE_COS)
ENDIF(${BUILD_WITH_COS})
ENDIF(${TD_LINUX})
ENDIF ()
target_include_directories(

View File

@ -284,7 +284,6 @@ static const SSysDbTableSchema topicSchema[] = {
{.name = "type", .bytes = 8 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
};
static const SSysDbTableSchema subscriptionSchema[] = {
{.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
@ -295,12 +294,13 @@ static const SSysDbTableSchema subscriptionSchema[] = {
};
static const SSysDbTableSchema vnodesSchema[] = {
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
{.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "dnode_ep", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "role_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
{.name = "start_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
{.name = "restored", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = true},
};
static const SSysDbTableSchema userUserPrivilegesSchema[] = {
@ -314,7 +314,7 @@ static const SSysDbTableSchema userUserPrivilegesSchema[] = {
static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true},
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
{TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true},
// {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true},
{TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true},
{TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema), true},
{TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true},

View File

@ -26,7 +26,7 @@ int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRo
if (pColumnInfoData->reassigned) {
int32_t totalSize = 0;
for (int32_t row = 0; row < numOfRows; ++row) {
char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row];
char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row];
int32_t colSize = 0;
if (pColumnInfoData->info.type == TSDB_DATA_TYPE_JSON) {
colSize = getJsonValueLen(pColData);
@ -142,18 +142,10 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
return 0;
}
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData) {
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx,
const char* pData) {
int32_t type = pColumnInfoData->info.type;
if (IS_VAR_DATA_TYPE(type)) {
int32_t dataLen = 0;
if (type == TSDB_DATA_TYPE_JSON) {
dataLen = getJsonValueLen(pData);
} else {
dataLen = varDataTLen(pData);
}
SVarColAttr* pAttr = &pColumnInfoData->varmeta;
pColumnInfoData->varmeta.offset[dstRowIdx] = pColumnInfoData->varmeta.offset[srcRowIdx];
pColumnInfoData->reassigned = true;
} else {
@ -164,7 +156,6 @@ int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx,
return 0;
}
static int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) {
if (!IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
return TSDB_CODE_SUCCESS;
@ -188,16 +179,17 @@ static int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize)
}
static int32_t doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t currentRow, const char* pData,
int32_t itemLen, int32_t numOfRows, bool trimValue) {
int32_t itemLen, int32_t numOfRows, bool trimValue) {
if (pColumnInfoData->info.bytes < itemLen) {
uWarn("column/tag actual data len %d is bigger than schema len %d, trim it:%d", itemLen, pColumnInfoData->info.bytes, trimValue);
uWarn("column/tag actual data len %d is bigger than schema len %d, trim it:%d", itemLen,
pColumnInfoData->info.bytes, trimValue);
if (trimValue) {
itemLen = pColumnInfoData->info.bytes;
} else {
return TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER;
}
}
size_t start = 1;
// the first item
@ -230,8 +222,8 @@ static int32_t doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t cur
return TSDB_CODE_SUCCESS;
}
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
uint32_t numOfRows, bool trimValue) {
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows,
bool trimValue) {
int32_t len = pColumnInfoData->info.bytes;
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
len = varDataTLen(pData);
@ -246,6 +238,82 @@ int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow,
return doCopyNItems(pColumnInfoData, currentRow, pData, len, numOfRows, trimValue);
}
void colDataSetNItemsNull(SColumnInfoData* pColumnInfoData, uint32_t currentRow, uint32_t numOfRows) {
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
memset(&pColumnInfoData->varmeta.offset[currentRow], -1, sizeof(int32_t) * numOfRows);
} else {
if (numOfRows < sizeof(char) * 2) {
for (int32_t i = 0; i < numOfRows; ++i) {
colDataSetNull_f(pColumnInfoData->nullbitmap, currentRow + i);
}
} else {
int32_t i = 0;
for (; i < numOfRows; ++i) {
if (BitPos(currentRow + i)) {
colDataSetNull_f(pColumnInfoData->nullbitmap, currentRow + i);
} else {
break;
}
}
memset(&BMCharPos(pColumnInfoData->nullbitmap, currentRow + i), 0xFF, (numOfRows - i) / sizeof(char));
i += (numOfRows - i) / sizeof(char) * sizeof(char);
for (; i < numOfRows; ++i) {
colDataSetNull_f(pColumnInfoData->nullbitmap, currentRow + i);
}
}
}
}
int32_t colDataCopyAndReassign(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows) {
int32_t code = colDataSetVal(pColumnInfoData, currentRow, pData, false);
if (code) {
return code;
}
if (numOfRows > 1) {
int32_t* pOffset = pColumnInfoData->varmeta.offset;
memset(&pOffset[currentRow + 1], pOffset[currentRow], sizeof(pOffset[0]) * (numOfRows - 1));
pColumnInfoData->reassigned = true;
}
return TSDB_CODE_SUCCESS;
}
int32_t colDataCopyNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
uint32_t numOfRows, bool isNull) {
int32_t len = pColumnInfoData->info.bytes;
if (isNull) {
colDataSetNItemsNull(pColumnInfoData, currentRow, numOfRows);
pColumnInfoData->hasNull = true;
return 0;
}
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
return colDataCopyAndReassign(pColumnInfoData, currentRow, pData, numOfRows);
} else {
int32_t colBytes = pColumnInfoData->info.bytes;
int32_t colOffset = currentRow * colBytes;
uint32_t num = 1;
void* pStart = pColumnInfoData->pData + colOffset;
memcpy(pStart, pData, colBytes);
colOffset += num * colBytes;
while (num < numOfRows) {
int32_t maxNum = num << 1;
int32_t tnum = maxNum > numOfRows ? (numOfRows - num) : num;
memcpy(pColumnInfoData->pData + colOffset, pStart, tnum * colBytes);
colOffset += tnum * colBytes;
num += tnum;
}
}
return TSDB_CODE_SUCCESS;
}
static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, const SColumnInfoData* pSource,
int32_t numOfRow2) {
if (numOfRow2 <= 0) return;
@ -262,7 +330,7 @@ static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, c
uint8_t* p = (uint8_t*)pSource->nullbitmap;
pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] &= (0B11111111 << shiftBits); // clear remind bits
pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] |= (p[0] >> remindBits); // copy remind bits
pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] |= (p[0] >> remindBits); // copy remind bits
if (BitmapLen(numOfRow1) == BitmapLen(total)) {
return;
@ -350,7 +418,7 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int
pColumnInfoData->pData = tmp;
if (BitmapLen(numOfRow1) < BitmapLen(finalNumOfRows)) {
char* btmp = taosMemoryRealloc(pColumnInfoData->nullbitmap, BitmapLen(finalNumOfRows));
char* btmp = taosMemoryRealloc(pColumnInfoData->nullbitmap, BitmapLen(finalNumOfRows));
if (btmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@ -622,7 +690,7 @@ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) {
if (pCol->reassigned && IS_VAR_DATA_TYPE(pCol->info.type)) {
for (int32_t row = 0; row < numOfRows; ++row) {
char* pColData = pCol->pData + pCol->varmeta.offset[row];
char* pColData = pCol->pData + pCol->varmeta.offset[row];
int32_t colSize = 0;
if (pCol->info.type == TSDB_DATA_TYPE_JSON) {
colSize = getJsonValueLen(pColData);
@ -698,8 +766,7 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) {
return TSDB_CODE_SUCCESS;
}
static bool colDataIsNNull(const SColumnInfoData* pColumnInfoData, int32_t startIndex,
uint32_t nRows) {
static bool colDataIsNNull(const SColumnInfoData* pColumnInfoData, int32_t startIndex, uint32_t nRows) {
if (!pColumnInfoData->hasNull) {
return false;
}
@ -880,7 +947,6 @@ int32_t dataBlockCompar(const void* p1, const void* p2, const void* param) {
}
static int32_t blockDataAssign(SColumnInfoData* pCols, const SSDataBlock* pDataBlock, const int32_t* index) {
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
SColumnInfoData* pDst = &pCols[i];
@ -1131,6 +1197,7 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo*
if (tmp == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
// memset(tmp, 0, numOfRows * pColumn->info.bytes);
// copy back the existed data
if (pColumn->pData != NULL) {
@ -1474,8 +1541,8 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int
int end = nRows;
while (start <= end) {
int mid = start + (end - start) / 2;
//data size + var data type columns offset + fixed data type columns bitmap len
int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid);
// data size + var data type columns offset + fixed data type columns bitmap len
int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid);
if (midSize > payloadSize) {
result = mid;
end = mid - 1;
@ -1669,7 +1736,7 @@ int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) {
if (pColData->reassigned && IS_VAR_DATA_TYPE(pColData->info.type)) {
for (int32_t row = 0; row < rows; ++row) {
char* pData = pColData->pData + pColData->varmeta.offset[row];
char* pData = pColData->pData + pColData->varmeta.offset[row];
int32_t colSize = 0;
if (pColData->info.type == TSDB_DATA_TYPE_JSON) {
colSize = getJsonValueLen(pData);
@ -1771,8 +1838,8 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
}
// for debug
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) {
int32_t size = 2048*1024;
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf, const char* taskIdStr) {
int32_t size = 2048 * 1024;
*pDataBuf = taosMemoryCalloc(size, 1);
char* dumpBuf = *pDataBuf;
char pBuf[128] = {0};
@ -1780,9 +1847,9 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
int32_t rows = pDataBlock->info.rows;
int32_t len = 0;
len += snprintf(dumpBuf + len, size - len,
"===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64
"%s===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64
"|rows:%" PRId64 "|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "|tbl:%s\n",
flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.id.groupId,
taskIdStr, flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.id.groupId,
pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version,
pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey, pDataBlock->info.parTbName);
if (len >= size - 1) return dumpBuf;
@ -2156,21 +2223,21 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
data += metaSize;
dataLen += metaSize;
if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) {
colSizes[col] = 0;
for (int32_t row = 0; row < numOfRows; ++row) {
char* pColData = pColRes->pData + pColRes->varmeta.offset[row];
int32_t colSize = 0;
if (pColRes->info.type == TSDB_DATA_TYPE_JSON) {
colSize = getJsonValueLen(pColData);
} else {
colSize = varDataTLen(pColData);
}
colSizes[col] += colSize;
dataLen += colSize;
memmove(data, pColData, colSize);
data += colSize;
if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) {
colSizes[col] = 0;
for (int32_t row = 0; row < numOfRows; ++row) {
char* pColData = pColRes->pData + pColRes->varmeta.offset[row];
int32_t colSize = 0;
if (pColRes->info.type == TSDB_DATA_TYPE_JSON) {
colSize = getJsonValueLen(pColData);
} else {
colSize = varDataTLen(pColData);
}
colSizes[col] += colSize;
dataLen += colSize;
memmove(data, pColData, colSize);
data += colSize;
}
} else {
colSizes[col] = colDataGetLength(pColRes, numOfRows);
dataLen += colSizes[col];
@ -2181,7 +2248,8 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
}
colSizes[col] = htonl(colSizes[col]);
// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, htonl(colSizes[col]), colSizes[col]);
// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type,
// htonl(colSizes[col]), colSizes[col]);
}
*actualLen = dataLen;
@ -2283,7 +2351,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
}
void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList) {
// int32_t totalRows = pBlock->info.rows;
// int32_t totalRows = pBlock->info.rows;
int32_t bmLen = BitmapLen(totalRows);
char* pBitmap = NULL;
int32_t maxRows = 0;
@ -2310,8 +2378,9 @@ void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList
if (colDataIsNull_var(pDst, j)) {
colDataSetNull_var(pDst, numOfRows);
} else {
// fix address sanitizer error. p1 may point to memory that will change during realloc of colDataSetVal, first copy it to p2
char* p1 = colDataGetVarData(pDst, j);
// fix address sanitizer error. p1 may point to memory that will change during realloc of colDataSetVal, first
// copy it to p2
char* p1 = colDataGetVarData(pDst, j);
int32_t len = 0;
if (pDst->info.type == TSDB_DATA_TYPE_JSON) {
len = getJsonValueLen(p1);
@ -2426,4 +2495,4 @@ void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList
int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
return blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)) + blockDataGetSize(pBlock);
}
}

View File

@ -216,7 +216,11 @@ uint32_t tsCurRange = 100; // range
char tsCompressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
// udf
bool tsStartUdfd = true;
#ifdef WINDOWS
bool tsStartUdfd = false;
#else
bool tsStartUdfd = true;
#endif
// wal
int64_t tsWalFsyncDataSizeLimit = (100 * 1024 * 1024L);
@ -236,6 +240,15 @@ int64_t tsStreamBufferSize = 128 * 1024 * 1024;
int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000;
bool tsFilterScalarMode = false;
int32_t tsKeepTimeOffset = 0; // latency of data migration
int tsResolveFQDNRetryTime = 100; //seconds
char tsS3Endpoint[TSDB_FQDN_LEN] = "<endpoint>";
char tsS3AccessKey[TSDB_FQDN_LEN] = "<accesskey>";
char tsS3AccessKeyId[TSDB_FQDN_LEN] = "<accesskeyid>";
char tsS3AccessKeySecret[TSDB_FQDN_LEN] = "<accesskeysecrect>";
char tsS3BucketName[TSDB_FQDN_LEN] = "<bucketname>";
char tsS3AppId[TSDB_FQDN_LEN] = "<appid>";
int8_t tsS3Enabled = false;
#ifndef _STORAGE
int32_t taosSetTfsCfg(SConfig *pCfg) {
@ -258,7 +271,43 @@ int32_t taosSetTfsCfg(SConfig *pCfg) {
int32_t taosSetTfsCfg(SConfig *pCfg);
#endif
struct SConfig *taosGetCfg() { return tsCfg; }
int32_t taosSetS3Cfg(SConfig *pCfg) {
tstrncpy(tsS3AccessKey, cfgGetItem(pCfg, "s3Accesskey")->str, TSDB_FQDN_LEN);
if (tsS3AccessKey[0] == '<') {
return 0;
}
char *colon = strchr(tsS3AccessKey, ':');
if (!colon) {
uError("invalid access key:%s", tsS3AccessKey);
return -1;
}
*colon = '\0';
tstrncpy(tsS3AccessKeyId, tsS3AccessKey, TSDB_FQDN_LEN);
tstrncpy(tsS3AccessKeySecret, colon + 1, TSDB_FQDN_LEN);
tstrncpy(tsS3Endpoint, cfgGetItem(pCfg, "s3Endpoint")->str, TSDB_FQDN_LEN);
tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN);
char *cos = strstr(tsS3Endpoint, "cos.");
if (cos) {
char *appid = strrchr(tsS3BucketName, '-');
if (!appid) {
uError("failed to locate appid in bucket:%s", tsS3BucketName);
return -1;
} else {
tstrncpy(tsS3AppId, appid + 1, TSDB_FQDN_LEN);
}
}
if (tsS3BucketName[0] != '<' && tsDiskCfgNum > 1) {
#ifdef USE_COS
tsS3Enabled = true;
#endif
}
return 0;
}
struct SConfig *taosGetCfg() {
return tsCfg;
}
static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile,
char *apolloUrl) {
@ -580,6 +629,11 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "keepTimeOffset", tsKeepTimeOffset, 0, 23, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddInt32(pCfg, "resolveFQDNRetryTime", tsResolveFQDNRetryTime, 1, 10240, 0) != 0) return -1;
if (cfgAddString(pCfg, "s3Accesskey", tsS3AccessKey, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint, CFG_SCOPE_SERVER) != 0) return -1;
if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1;
GRANT_CFG_ADD;
return 0;
@ -979,6 +1033,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsKeepTimeOffset = cfgGetItem(pCfg, "keepTimeOffset")->i32;
tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32;
tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32;
tsResolveFQDNRetryTime = cfgGetItem(pCfg, "resolveFQDNRetryTime")->i32;
GRANT_CFG_GET;
return 0;
@ -1502,6 +1557,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
if (taosSetServerCfg(tsCfg)) return -1;
if (taosSetReleaseCfg(tsCfg)) return -1;
if (taosSetTfsCfg(tsCfg) != 0) return -1;
if (taosSetS3Cfg(tsCfg) != 0) return -1;
}
taosSetSystemCfg(tsCfg);

View File

@ -534,6 +534,7 @@ int32_t tSerializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pReq
if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
if (tEncodeCStr(&encoder, pField->comment) < 0) return -1;
}
for (int32_t i = 0; i < pReq->numOfTags; ++i) {
@ -542,6 +543,7 @@ int32_t tSerializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pReq
if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
if (tEncodeCStr(&encoder, pField->comment) < 0) return -1;
}
for (int32_t i = 0; i < pReq->numOfFuncs; ++i) {
@ -608,6 +610,7 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR
if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
if (tDecodeI32(&decoder, &field.bytes) < 0) return -1;
if (tDecodeCStrTo(&decoder, field.name) < 0) return -1;
if (tDecodeCStrTo(&decoder, field.comment) < 0) return -1;
if (taosArrayPush(pReq->pColumns, &field) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
@ -620,6 +623,7 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR
if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
if (tDecodeI32(&decoder, &field.bytes) < 0) return -1;
if (tDecodeCStrTo(&decoder, field.name) < 0) return -1;
if (tDecodeCStrTo(&decoder, field.comment) < 0) return -1;
if (taosArrayPush(pReq->pTags, &field) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
@ -1078,9 +1082,9 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
if (tEncodeI64(&encoder, pload->compStorage) < 0) return -1;
if (tEncodeI64(&encoder, pload->pointsWritten) < 0) return -1;
if (tEncodeI32(&encoder, pload->numOfCachedTables) < 0) return -1;
if (tEncodeI32(&encoder, reserved) < 0) return -1;
if (tEncodeI64(&encoder, reserved) < 0) return -1;
if (tEncodeI64(&encoder, reserved) < 0) return -1;
if (tEncodeI32(&encoder, pload->learnerProgress) < 0) return -1;
if (tEncodeI64(&encoder, pload->roleTimeMs) < 0) return -1;
if (tEncodeI64(&encoder, pload->startTimeMs) < 0) return -1;
}
// mnode loads
@ -1104,6 +1108,16 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
if (tEncodeI64(&encoder, pReq->mload.syncTerm) < 0) return -1;
if (tEncodeI64(&encoder, pReq->mload.roleTimeMs) < 0) return -1;
if (tEncodeI8(&encoder, pReq->clusterCfg.ttlChangeOnWrite) < 0) return -1;
// vnode extra
for (int32_t i = 0; i < vlen; ++i) {
SVnodeLoad *pload = taosArrayGet(pReq->pVloads, i);
int64_t reserved = 0;
if (tEncodeI64(&encoder, pload->syncTerm) < 0) return -1;
if (tEncodeI64(&encoder, reserved) < 0) return -1;
if (tEncodeI64(&encoder, reserved) < 0) return -1;
if (tEncodeI64(&encoder, reserved) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -1148,8 +1162,8 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
for (int32_t i = 0; i < vlen; ++i) {
SVnodeLoad vload = {0};
int64_t reserved64 = 0;
int32_t reserved32 = 0;
vload.syncTerm = -1;
if (tDecodeI32(&decoder, &vload.vgId) < 0) return -1;
if (tDecodeI8(&decoder, &vload.syncState) < 0) return -1;
if (tDecodeI8(&decoder, &vload.syncRestore) < 0) return -1;
@ -1161,15 +1175,16 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
if (tDecodeI64(&decoder, &vload.compStorage) < 0) return -1;
if (tDecodeI64(&decoder, &vload.pointsWritten) < 0) return -1;
if (tDecodeI32(&decoder, &vload.numOfCachedTables) < 0) return -1;
if (tDecodeI32(&decoder, (int32_t *)&reserved32) < 0) return -1;
if (tDecodeI64(&decoder, &reserved64) < 0) return -1;
if (tDecodeI64(&decoder, &reserved64) < 0) return -1;
if (tDecodeI32(&decoder, &vload.learnerProgress) < 0) return -1;
if (tDecodeI64(&decoder, &vload.roleTimeMs) < 0) return -1;
if (tDecodeI64(&decoder, &vload.startTimeMs) < 0) return -1;
if (taosArrayPush(pReq->pVloads, &vload) == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
}
// mnode loads
if (tDecodeI8(&decoder, &pReq->mload.syncState) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->mload.syncRestore) < 0) return -1;
@ -1200,6 +1215,17 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
if (tDecodeI8(&decoder, &pReq->clusterCfg.ttlChangeOnWrite) < 0) return -1;
}
// vnode extra
if (!tDecodeIsEnd(&decoder)) {
for (int32_t i = 0; i < vlen; ++i) {
SVnodeLoad *pLoad = taosArrayGet(pReq->pVloads, i);
int64_t reserved = 0;
if (tDecodeI64(&decoder, &pLoad->syncTerm) < 0) return -1;
if (tDecodeI64(&decoder, &reserved) < 0) return -1;
if (tDecodeI64(&decoder, &reserved) < 0) return -1;
if (tDecodeI64(&decoder, &reserved) < 0) return -1;
}
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
@ -2301,7 +2327,7 @@ int32_t tDeserializeSTableCfgRsp(void *buf, int32_t bufLen, STableCfgRsp *pRsp)
}
int32_t totalCols = pRsp->numOfTags + pRsp->numOfColumns;
pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols);
pRsp->pSchemas = taosMemoryCalloc(totalCols, sizeof(SSchema));
if (pRsp->pSchemas == NULL) return -1;
for (int32_t i = 0; i < totalCols; ++i) {
@ -3684,7 +3710,7 @@ static int32_t tDecodeSTableMetaRsp(SDecoder *pDecoder, STableMetaRsp *pRsp) {
int32_t totalCols = pRsp->numOfTags + pRsp->numOfColumns;
if (totalCols > 0) {
pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols);
pRsp->pSchemas = taosMemoryCalloc(totalCols, sizeof(SSchema));
if (pRsp->pSchemas == NULL) return -1;
for (int32_t i = 0; i < totalCols; ++i) {
@ -4322,6 +4348,7 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR
SReplica *pReplica = &pReq->learnerReplicas[i];
if (tEncodeSReplica(&encoder, pReplica) < 0) return -1;
}
if (tEncodeI32(&encoder, pReq->changeVersion) < 0) return -1;
tEndEncode(&encoder);
@ -4408,6 +4435,9 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *
if (tDecodeSReplica(&decoder, pReplica) < 0) return -1;
}
}
if (!tDecodeIsEnd(&decoder)) {
if (tDecodeI32(&decoder, &pReq->changeVersion) < 0) return -1;
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -4640,6 +4670,7 @@ int32_t tSerializeSAlterVnodeReplicaReq(void *buf, int32_t bufLen, SAlterVnodeRe
SReplica *pReplica = &pReq->learnerReplicas[i];
if (tEncodeSReplica(&encoder, pReplica) < 0) return -1;
}
if (tEncodeI32(&encoder, pReq->changeVersion) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -4671,6 +4702,9 @@ int32_t tDeserializeSAlterVnodeReplicaReq(void *buf, int32_t bufLen, SAlterVnode
if (tDecodeSReplica(&decoder, pReplica) < 0) return -1;
}
}
if (!tDecodeIsEnd(&decoder)){
if (tDecodeI32(&decoder, &pReq->changeVersion) < 0) return -1;
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -4714,7 +4748,8 @@ int32_t tSerializeSAlterVnodeHashRangeReq(void *buf, int32_t bufLen, SAlterVnode
if (tEncodeI32(&encoder, pReq->dstVgId) < 0) return -1;
if (tEncodeI32(&encoder, pReq->hashBegin) < 0) return -1;
if (tEncodeI32(&encoder, pReq->hashEnd) < 0) return -1;
if (tEncodeI64(&encoder, pReq->reserved) < 0) return -1;
if (tEncodeI32(&encoder, pReq->changeVersion) < 0) return -1;
if (tEncodeI32(&encoder, pReq->reserved) < 0) return -1;
tEndEncode(&encoder);
@ -4732,7 +4767,8 @@ int32_t tDeserializeSAlterVnodeHashRangeReq(void *buf, int32_t bufLen, SAlterVno
if (tDecodeI32(&decoder, &pReq->dstVgId) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->hashBegin) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->hashEnd) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->reserved) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->changeVersion) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->reserved) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -5537,6 +5573,82 @@ void tFreeSSubQueryMsg(SSubQueryMsg *pReq) {
taosMemoryFreeClear(pReq->msg);
}
int32_t tSerializeSOperatorParam(SEncoder* pEncoder, SOperatorParam* pOpParam) {
if (tEncodeI32(pEncoder, pOpParam->opType) < 0) return -1;
if (tEncodeI32(pEncoder, pOpParam->downstreamIdx) < 0) return -1;
switch (pOpParam->opType) {
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: {
STableScanOperatorParam* pScan = (STableScanOperatorParam*)pOpParam->value;
if (tEncodeI8(pEncoder, pScan->tableSeq) < 0) return -1;
int32_t uidNum = taosArrayGetSize(pScan->pUidList);
if (tEncodeI32(pEncoder, uidNum) < 0) return -1;
for (int32_t m = 0; m < uidNum; ++m) {
int64_t* pUid = taosArrayGet(pScan->pUidList, m);
if (tEncodeI64(pEncoder, *pUid) < 0) return -1;
}
break;
}
default:
return TSDB_CODE_INVALID_PARA;
}
int32_t n = taosArrayGetSize(pOpParam->pChildren);
if (tEncodeI32(pEncoder, n) < 0) return -1;
for (int32_t i = 0; i < n; ++i) {
SOperatorParam* pChild = *(SOperatorParam**)taosArrayGet(pOpParam->pChildren, i);
if (tSerializeSOperatorParam(pEncoder, pChild) < 0) return -1;
}
return 0;
}
int32_t tDeserializeSOperatorParam(SDecoder *pDecoder, SOperatorParam* pOpParam) {
if (tDecodeI32(pDecoder, &pOpParam->opType) < 0) return -1;
if (tDecodeI32(pDecoder, &pOpParam->downstreamIdx) < 0) return -1;
switch (pOpParam->opType) {
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN: {
STableScanOperatorParam* pScan = taosMemoryMalloc(sizeof(STableScanOperatorParam));
if (NULL == pScan) return -1;
if (tDecodeI8(pDecoder, (int8_t*)&pScan->tableSeq) < 0) return -1;
int32_t uidNum = 0;
int64_t uid = 0;
if (tDecodeI32(pDecoder, &uidNum) < 0) return -1;
if (uidNum > 0) {
pScan->pUidList = taosArrayInit(uidNum, sizeof(int64_t));
if (NULL == pScan->pUidList) return -1;
for (int32_t m = 0; m < uidNum; ++m) {
if (tDecodeI64(pDecoder, &uid) < 0) return -1;
taosArrayPush(pScan->pUidList, &uid);
}
} else {
pScan->pUidList = NULL;
}
pOpParam->value = pScan;
break;
}
default:
return TSDB_CODE_INVALID_PARA;
}
int32_t childrenNum = 0;
if (tDecodeI32(pDecoder, &childrenNum) < 0) return -1;
if (childrenNum > 0) {
pOpParam->pChildren = taosArrayInit(childrenNum, POINTER_BYTES);
if (NULL == pOpParam->pChildren) return -1;
for (int32_t i = 0; i < childrenNum; ++i) {
SOperatorParam* pChild = taosMemoryCalloc(1, sizeof(SOperatorParam));
if (NULL == pChild) return -1;
if (tDeserializeSOperatorParam(pDecoder, pChild) < 0) return -1;
taosArrayPush(pOpParam->pChildren, &pChild);
}
} else {
pOpParam->pChildren = NULL;
}
return 0;
}
int32_t tSerializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq) {
int32_t headLen = sizeof(SMsgHead);
if (buf != NULL) {
@ -5552,6 +5664,12 @@ int32_t tSerializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq) {
if (tEncodeU64(&encoder, pReq->queryId) < 0) return -1;
if (tEncodeU64(&encoder, pReq->taskId) < 0) return -1;
if (tEncodeI32(&encoder, pReq->execId) < 0) return -1;
if (pReq->pOpParam) {
if (tEncodeI32(&encoder, 1) < 0) return -1;
if (tSerializeSOperatorParam(&encoder, pReq->pOpParam) < 0) return -1;
} else {
if (tEncodeI32(&encoder, 0) < 0) return -1;
}
tEndEncode(&encoder);
@ -5584,6 +5702,14 @@ int32_t tDeserializeSResFetchReq(void *buf, int32_t bufLen, SResFetchReq *pReq)
if (tDecodeU64(&decoder, &pReq->taskId) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->execId) < 0) return -1;
int32_t paramNum = 0;
if (tDecodeI32(&decoder, &paramNum) < 0) return -1;
if (paramNum > 0) {
pReq->pOpParam = taosMemoryMalloc(sizeof(*pReq->pOpParam));
if (NULL == pReq->pOpParam) return -1;
if (tDeserializeSOperatorParam(&decoder, pReq->pOpParam) < 0) return -1;
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
@ -5728,11 +5854,18 @@ int32_t tSerializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pRsp
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pRsp->code) < 0) return -1;
if (tEncodeCStr(&encoder, pRsp->tbFName) < 0) return -1;
if (tEncodeI32(&encoder, pRsp->sversion) < 0) return -1;
if (tEncodeI32(&encoder, pRsp->tversion) < 0) return -1;
if (tEncodeI64(&encoder, pRsp->affectedRows) < 0) return -1;
int32_t tbNum = taosArrayGetSize(pRsp->tbVerInfo);
if (tEncodeI32(&encoder, tbNum) < 0) return -1;
if (tbNum > 0) {
for (int32_t i = 0; i < tbNum; ++i) {
STbVerInfo *pVer = taosArrayGet(pRsp->tbVerInfo, i);
if (tEncodeCStr(&encoder, pVer->tbFName) < 0) return -1;
if (tEncodeI32(&encoder, pVer->sversion) < 0) return -1;
if (tEncodeI32(&encoder, pVer->tversion) < 0) return -1;
}
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -5748,11 +5881,19 @@ int32_t tDeserializeSQueryTableRsp(void *buf, int32_t bufLen, SQueryTableRsp *pR
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pRsp->code) < 0) return -1;
if (tDecodeCStrTo(&decoder, pRsp->tbFName) < 0) return -1;
if (tDecodeI32(&decoder, &pRsp->sversion) < 0) return -1;
if (tDecodeI32(&decoder, &pRsp->tversion) < 0) return -1;
if (tDecodeI64(&decoder, &pRsp->affectedRows) < 0) return -1;
int32_t tbNum = 0;
if (tDecodeI32(&decoder, &tbNum) < 0) return -1;
if (tbNum > 0) {
pRsp->tbVerInfo = taosArrayInit(tbNum, sizeof(STbVerInfo));
if (NULL == pRsp->tbVerInfo) return -1;
STbVerInfo tbVer;
if (tDecodeCStrTo(&decoder, tbVer.tbFName) < 0) return -1;
if (tDecodeI32(&decoder, &tbVer.sversion) < 0) return -1;
if (tDecodeI32(&decoder, &tbVer.tversion) < 0) return -1;
if (NULL == taosArrayPush(pRsp->tbVerInfo, &tbVer)) return -1;
}
tEndDecode(&decoder);
tDecoderClear(&decoder);

View File

@ -46,7 +46,7 @@ static int32_t mmDecodeOption(SJson *pJson, SMnodeOpt *pOption) {
if (code < 0) return -1;
tjsonGetInt32ValueFromDouble(replica, "role", pOption->nodeRoles[i], code);
if (code < 0) return -1;
if(pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER){
if (pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER) {
pOption->numOfReplicas++;
}
}
@ -65,7 +65,7 @@ int32_t mmReadFile(const char *path, SMnodeOpt *pOption) {
char file[PATH_MAX] = {0};
snprintf(file, sizeof(file), "%s%smnode.json", path, TD_DIRSEP);
if (taosStatFile(file, NULL, NULL) < 0) {
if (taosStatFile(file, NULL, NULL, NULL) < 0) {
dInfo("mnode file:%s not exist", file);
return 0;
}

View File

@ -106,6 +106,8 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_DND_CONFIG_DNODE_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_MNODE_TYPE_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_VNODE_TYPE_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_CONFIG_CHANGE_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_CONNECT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_ACCT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -97,6 +97,7 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t vmProcessDisableVnodeWriteReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t vmProcessCheckLearnCatchupReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg);
// vmFile.c
int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes);

View File

@ -97,7 +97,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t
SWrapperCfg *pCfgs = NULL;
snprintf(file, sizeof(file), "%s%svnodes.json", pMgmt->path, TD_DIRSEP);
if (taosStatFile(file, NULL, NULL) < 0) {
if (taosStatFile(file, NULL, NULL, NULL) < 0) {
dInfo("vnode file:%s not exist", file);
return 0;
}

View File

@ -133,6 +133,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
pCfg->standby = 0;
pCfg->syncCfg.replicaNum = 0;
pCfg->syncCfg.totalReplicaNum = 0;
pCfg->syncCfg.changeVersion = pCreate->changeVersion;
memset(&pCfg->syncCfg.nodeInfo, 0, sizeof(pCfg->syncCfg.nodeInfo));
for (int32_t i = 0; i < pCreate->replica; ++i) {
@ -144,7 +145,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
tmsgUpdateDnodeInfo(&pNode->nodeId, &pNode->clusterId, pNode->nodeFqdn, &pNode->nodePort);
pCfg->syncCfg.replicaNum++;
}
if(pCreate->selfIndex != -1){
if (pCreate->selfIndex != -1) {
pCfg->syncCfg.myIndex = pCreate->selfIndex;
}
for (int32_t i = pCfg->syncCfg.replicaNum; i < pCreate->replica + pCreate->learnerReplica; ++i) {
@ -157,7 +158,7 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
pCfg->syncCfg.totalReplicaNum++;
}
pCfg->syncCfg.totalReplicaNum += pCfg->syncCfg.replicaNum;
if(pCreate->learnerSelfIndex != -1){
if (pCreate->learnerSelfIndex != -1) {
pCfg->syncCfg.myIndex = pCreate->replica + pCreate->learnerSelfIndex;
}
}
@ -201,38 +202,38 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return -1;
}
if(req.learnerReplica == 0)
{
if (req.learnerReplica == 0) {
req.learnerSelfIndex = -1;
}
dInfo("vgId:%d, vnode management handle msgType:%s, start to create vnode, page:%d pageSize:%d buffer:%d szPage:%d szBuf:%" PRIu64
", cacheLast:%d cacheLastSize:%d sstTrigger:%d tsdbPageSize:%d %d dbname:%s dbId:%" PRId64
", days:%d keep0:%d keep1:%d keep2:%d tsma:%d precision:%d compression:%d minRows:%d maxRows:%d"
", wal fsync:%d level:%d retentionPeriod:%d retentionSize:%" PRId64 " rollPeriod:%d segSize:%" PRId64
", hash method:%d begin:%u end:%u prefix:%d surfix:%d replica:%d selfIndex:%d "
"learnerReplica:%d learnerSelfIndex:%d strict:%d",
req.vgId, TMSG_INFO(pMsg->msgType), req.pages, req.pageSize, req.buffer, req.pageSize * 1024,
(uint64_t)req.buffer * 1024 * 1024,
req.cacheLast, req.cacheLastSize, req.sstTrigger, req.tsdbPageSize, req.tsdbPageSize * 1024, req.db, req.dbUid,
req.daysPerFile, req.daysToKeep0, req.daysToKeep1, req.daysToKeep2, req.isTsma, req.precision, req.compression,
req.minRows, req.maxRows, req.walFsyncPeriod, req.walLevel, req.walRetentionPeriod, req.walRetentionSize,
req.walRollPeriod, req.walSegmentSize, req.hashMethod, req.hashBegin, req.hashEnd, req.hashPrefix,
req.hashSuffix, req.replica, req.selfIndex, req.learnerReplica, req.learnerSelfIndex, req.strict);
dInfo(
"vgId:%d, vnode management handle msgType:%s, start to create vnode, page:%d pageSize:%d buffer:%d szPage:%d "
"szBuf:%" PRIu64 ", cacheLast:%d cacheLastSize:%d sstTrigger:%d tsdbPageSize:%d %d dbname:%s dbId:%" PRId64
", days:%d keep0:%d keep1:%d keep2:%d tsma:%d precision:%d compression:%d minRows:%d maxRows:%d"
", wal fsync:%d level:%d retentionPeriod:%d retentionSize:%" PRId64 " rollPeriod:%d segSize:%" PRId64
", hash method:%d begin:%u end:%u prefix:%d surfix:%d replica:%d selfIndex:%d "
"learnerReplica:%d learnerSelfIndex:%d strict:%d changeVersion:%d",
req.vgId, TMSG_INFO(pMsg->msgType), req.pages, req.pageSize, req.buffer, req.pageSize * 1024,
(uint64_t)req.buffer * 1024 * 1024, req.cacheLast, req.cacheLastSize, req.sstTrigger, req.tsdbPageSize,
req.tsdbPageSize * 1024, req.db, req.dbUid, req.daysPerFile, req.daysToKeep0, req.daysToKeep1, req.daysToKeep2,
req.isTsma, req.precision, req.compression, req.minRows, req.maxRows, req.walFsyncPeriod, req.walLevel,
req.walRetentionPeriod, req.walRetentionSize, req.walRollPeriod, req.walSegmentSize, req.hashMethod,
req.hashBegin, req.hashEnd, req.hashPrefix, req.hashSuffix, req.replica, req.selfIndex, req.learnerReplica,
req.learnerSelfIndex, req.strict, req.changeVersion);
for (int32_t i = 0; i < req.replica; ++i) {
dInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", req.vgId, i, req.replicas[i].fqdn, req.replicas[i].port,
req.replicas[i].id);
}
for (int32_t i = 0; i < req.learnerReplica; ++i) {
dInfo("vgId:%d, learnerReplica:%d ep:%s:%u dnode:%d", req.vgId, i, req.learnerReplicas[i].fqdn,
req.learnerReplicas[i].port, req.replicas[i].id);
req.learnerReplicas[i].port, req.replicas[i].id);
}
SReplica *pReplica = NULL;
if(req.selfIndex != -1){
if (req.selfIndex != -1) {
pReplica = &req.replicas[req.selfIndex];
}
else{
} else {
pReplica = &req.learnerReplicas[req.learnerSelfIndex];
}
if (pReplica->id != pMgmt->pData->dnodeId || pReplica->port != tsServerPort ||
@ -313,10 +314,10 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
_OVER:
if (code != 0) {
vnodeClose(pImpl);
vnodeDestroy(path, pMgmt->pTfs);
vnodeDestroy(0, path, pMgmt->pTfs);
} else {
dInfo("vgId:%d, vnode management handle msgType:%s, end to create vnode, vnode is created",
req.vgId, TMSG_INFO(pMsg->msgType));
dInfo("vgId:%d, vnode management handle msgType:%s, end to create vnode, vnode is created", req.vgId,
TMSG_INFO(pMsg->msgType));
}
tFreeSCreateVnodeReq(&req);
@ -324,6 +325,7 @@ _OVER:
return code;
}
//alter replica doesn't use this, but restore dnode still use this
int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
SAlterVnodeTypeReq req = {0};
if (tDeserializeSAlterVnodeReplicaReq(pMsg->pCont, pMsg->contLen, &req) != 0) {
@ -331,11 +333,128 @@ int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return -1;
}
if (req.learnerReplicas == 0) {
req.learnerSelfIndex = -1;
}
dInfo("vgId:%d, vnode management handle msgType:%s, start to process alter-node-type-request", req.vgId,
TMSG_INFO(pMsg->msgType));
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
if (pVnode == NULL) {
dError("vgId:%d, failed to alter vnode type since %s", req.vgId, terrstr());
terrno = TSDB_CODE_VND_NOT_EXIST;
return -1;
}
ESyncRole role = vnodeGetRole(pVnode->pImpl);
dInfo("vgId:%d, checking node role:%d", req.vgId, role);
if (role == TAOS_SYNC_ROLE_VOTER) {
dError("vgId:%d, failed to alter vnode type since node already is role:%d", req.vgId, role);
terrno = TSDB_CODE_VND_ALREADY_IS_VOTER;
vmReleaseVnode(pMgmt, pVnode);
return -1;
}
dInfo("vgId:%d, checking node catch up", req.vgId);
if (vnodeIsCatchUp(pVnode->pImpl) != 1) {
terrno = TSDB_CODE_VND_NOT_CATCH_UP;
vmReleaseVnode(pMgmt, pVnode);
return -1;
}
dInfo("node:%s, catched up leader, continue to process alter-node-type-request", pMgmt->name);
int32_t vgId = req.vgId;
dInfo("vgId:%d, start to alter vnode type replica:%d selfIndex:%d strict:%d changeVersion:%d",
vgId, req.replica, req.selfIndex, req.strict, req.changeVersion);
for (int32_t i = 0; i < req.replica; ++i) {
SReplica *pReplica = &req.replicas[i];
dInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", vgId, i, pReplica->fqdn, pReplica->port, pReplica->id);
}
for (int32_t i = 0; i < req.learnerReplica; ++i) {
SReplica *pReplica = &req.learnerReplicas[i];
dInfo("vgId:%d, learnerReplicas:%d ep:%s:%u dnode:%d", vgId, i, pReplica->fqdn, pReplica->port, pReplica->id);
}
if (req.replica <= 0 || (req.selfIndex < 0 && req.learnerSelfIndex < 0) || req.selfIndex >= req.replica ||
req.learnerSelfIndex >= req.learnerReplica) {
terrno = TSDB_CODE_INVALID_MSG;
dError("vgId:%d, failed to alter replica since invalid msg", vgId);
vmReleaseVnode(pMgmt, pVnode);
return -1;
}
SReplica *pReplica = NULL;
if (req.selfIndex != -1) {
pReplica = &req.replicas[req.selfIndex];
} else {
pReplica = &req.learnerReplicas[req.learnerSelfIndex];
}
if (pReplica->id != pMgmt->pData->dnodeId || pReplica->port != tsServerPort ||
strcmp(pReplica->fqdn, tsLocalFqdn) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
dError("vgId:%d, dnodeId:%d ep:%s:%u not matched with local dnode", vgId, pReplica->id, pReplica->fqdn,
pReplica->port);
vmReleaseVnode(pMgmt, pVnode);
return -1;
}
dInfo("vgId:%d, start to close vnode", vgId);
SWrapperCfg wrapperCfg = {
.dropped = pVnode->dropped,
.vgId = pVnode->vgId,
.vgVersion = pVnode->vgVersion,
.diskPrimary = pVnode->diskPrimary,
};
tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path));
vmCloseVnode(pMgmt, pVnode, false);
int32_t diskPrimary = wrapperCfg.diskPrimary;
char path[TSDB_FILENAME_LEN] = {0};
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vgId);
dInfo("vgId:%d, start to alter vnode replica at %s", vgId, path);
if (vnodeAlterReplica(path, &req, diskPrimary, pMgmt->pTfs) < 0) {
dError("vgId:%d, failed to alter vnode at %s since %s", vgId, path, terrstr());
return -1;
}
dInfo("vgId:%d, begin to open vnode", vgId);
SVnode *pImpl = vnodeOpen(path, diskPrimary, pMgmt->pTfs, pMgmt->msgCb);
if (pImpl == NULL) {
dError("vgId:%d, failed to open vnode at %s since %s", vgId, path, terrstr());
return -1;
}
if (vmOpenVnode(pMgmt, &wrapperCfg, pImpl) != 0) {
dError("vgId:%d, failed to open vnode mgmt since %s", vgId, terrstr());
return -1;
}
if (vnodeStart(pImpl) != 0) {
dError("vgId:%d, failed to start sync since %s", vgId, terrstr());
return -1;
}
dInfo("vgId:%d, vnode management handle msgType:%s, end to process alter-node-type-request, vnode config is altered",
req.vgId, TMSG_INFO(pMsg->msgType));
return 0;
}
int32_t vmProcessCheckLearnCatchupReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
SCheckLearnCatchupReq req = {0};
if (tDeserializeSAlterVnodeReplicaReq(pMsg->pCont, pMsg->contLen, &req) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
return -1;
}
if(req.learnerReplicas == 0){
req.learnerSelfIndex = -1;
}
dInfo("vgId:%d, vnode management handle msgType:%s, start to process alter-node-type-request",
dInfo("vgId:%d, vnode management handle msgType:%s, start to process check-learner-catchup-request",
req.vgId, TMSG_INFO(pMsg->msgType));
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
@ -363,83 +482,11 @@ int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
dInfo("node:%s, catched up leader, continue to process alter-node-type-request", pMgmt->name);
int32_t vgId = req.vgId;
dInfo("vgId:%d, start to alter vnode type replica:%d selfIndex:%d strict:%d", vgId, req.replica, req.selfIndex,
req.strict);
for (int32_t i = 0; i < req.replica; ++i) {
SReplica *pReplica = &req.replicas[i];
dInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", vgId, i, pReplica->fqdn, pReplica->port, pReplica->id);
}
for (int32_t i = 0; i < req.learnerReplica; ++i) {
SReplica *pReplica = &req.learnerReplicas[i];
dInfo("vgId:%d, learnerReplicas:%d ep:%s:%u dnode:%d", vgId, i, pReplica->fqdn, pReplica->port, pReplica->id);
}
vmReleaseVnode(pMgmt, pVnode);
if (req.replica <= 0 ||
(req.selfIndex < 0 && req.learnerSelfIndex <0)||
req.selfIndex >= req.replica || req.learnerSelfIndex >= req.learnerReplica) {
terrno = TSDB_CODE_INVALID_MSG;
dError("vgId:%d, failed to alter replica since invalid msg", vgId);
vmReleaseVnode(pMgmt, pVnode);
return -1;
}
SReplica *pReplica = NULL;
if(req.selfIndex != -1){
pReplica = &req.replicas[req.selfIndex];
}
else{
pReplica = &req.learnerReplicas[req.learnerSelfIndex];
}
if (pReplica->id != pMgmt->pData->dnodeId || pReplica->port != tsServerPort ||
strcmp(pReplica->fqdn, tsLocalFqdn) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
dError("vgId:%d, dnodeId:%d ep:%s:%u not matched with local dnode", vgId, pReplica->id, pReplica->fqdn,
pReplica->port);
vmReleaseVnode(pMgmt, pVnode);
return -1;
}
dInfo("vgId:%d, start to close vnode", vgId);
SWrapperCfg wrapperCfg = {
.dropped = pVnode->dropped,
.vgId = pVnode->vgId,
.vgVersion = pVnode->vgVersion,
.diskPrimary = pVnode->diskPrimary,
};
tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path));
vmCloseVnode(pMgmt, pVnode, false);
int32_t diskPrimary = wrapperCfg.diskPrimary;
char path[TSDB_FILENAME_LEN] = {0};
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vgId);
dInfo("vgId:%d, start to alter vnode replica at %s", vgId, path);
if (vnodeAlterReplica(path, &req, diskPrimary, pMgmt->pTfs) < 0) {
dError("vgId:%d, failed to alter vnode at %s since %s", vgId, path, terrstr());
return -1;
}
dInfo("vgId:%d, begin to open vnode", vgId);
SVnode *pImpl = vnodeOpen(path, diskPrimary, pMgmt->pTfs, pMgmt->msgCb);
if (pImpl == NULL) {
dError("vgId:%d, failed to open vnode at %s since %s", vgId, path, terrstr());
return -1;
}
if (vmOpenVnode(pMgmt, &wrapperCfg, pImpl) != 0) {
dError("vgId:%d, failed to open vnode mgmt since %s", vgId, terrstr());
return -1;
}
if (vnodeStart(pImpl) != 0) {
dError("vgId:%d, failed to start sync since %s", vgId, terrstr());
return -1;
}
dInfo("vgId:%d, vnode management handle msgType:%s, end to process alter-node-type-request, vnode config is altered",
dInfo("vgId:%d, vnode management handle msgType:%s, end to process check-learner-catchup-request",
req.vgId, TMSG_INFO(pMsg->msgType));
return 0;
}
@ -510,8 +557,8 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
vmCloseVnode(pMgmt, pVnode, true);
int32_t diskPrimary = wrapperCfg.diskPrimary;
char srcPath[TSDB_FILENAME_LEN] = {0};
char dstPath[TSDB_FILENAME_LEN] = {0};
char srcPath[TSDB_FILENAME_LEN] = {0};
char dstPath[TSDB_FILENAME_LEN] = {0};
snprintf(srcPath, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, srcVgId);
snprintf(dstPath, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, dstVgId);
@ -523,6 +570,7 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
dInfo("vgId:%d, open vnode", dstVgId);
SVnode *pImpl = vnodeOpen(dstPath, diskPrimary, pMgmt->pTfs, pMgmt->msgCb);
if (pImpl == NULL) {
dError("vgId:%d, failed to open vnode at %s since %s", dstVgId, dstPath, terrstr());
return -1;
@ -555,15 +603,17 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return -1;
}
if(alterReq.learnerReplica == 0){
if (alterReq.learnerReplica == 0) {
alterReq.learnerSelfIndex = -1;
}
int32_t vgId = alterReq.vgId;
dInfo("vgId:%d,vnode management handle msgType:%s, start to alter vnode replica:%d selfIndex:%d leanerReplica:%d "
"learnerSelfIndex:%d strict:%d",
vgId, TMSG_INFO(pMsg->msgType), alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica,
alterReq.learnerSelfIndex, alterReq.strict);
dInfo(
"vgId:%d,vnode management handle msgType:%s, start to alter vnode replica:%d selfIndex:%d leanerReplica:%d "
"learnerSelfIndex:%d strict:%d changeVersion:%d",
vgId, TMSG_INFO(pMsg->msgType), alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica,
alterReq.learnerSelfIndex, alterReq.strict, alterReq.changeVersion);
for (int32_t i = 0; i < alterReq.replica; ++i) {
SReplica *pReplica = &alterReq.replicas[i];
dInfo("vgId:%d, replica:%d ep:%s:%u dnode:%d", vgId, i, pReplica->fqdn, pReplica->port, pReplica->port);
@ -573,8 +623,7 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
dInfo("vgId:%d, learnerReplicas:%d ep:%s:%u dnode:%d", vgId, i, pReplica->fqdn, pReplica->port, pReplica->port);
}
if (alterReq.replica <= 0 ||
(alterReq.selfIndex < 0 && alterReq.learnerSelfIndex <0)||
if (alterReq.replica <= 0 || (alterReq.selfIndex < 0 && alterReq.learnerSelfIndex < 0) ||
alterReq.selfIndex >= alterReq.replica || alterReq.learnerSelfIndex >= alterReq.learnerReplica) {
terrno = TSDB_CODE_INVALID_MSG;
dError("vgId:%d, failed to alter replica since invalid msg", vgId);
@ -582,10 +631,9 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
}
SReplica *pReplica = NULL;
if(alterReq.selfIndex != -1){
if (alterReq.selfIndex != -1) {
pReplica = &alterReq.replicas[alterReq.selfIndex];
}
else{
} else {
pReplica = &alterReq.learnerReplicas[alterReq.learnerSelfIndex];
}
@ -615,7 +663,7 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
vmCloseVnode(pMgmt, pVnode, false);
int32_t diskPrimary = wrapperCfg.diskPrimary;
char path[TSDB_FILENAME_LEN] = {0};
char path[TSDB_FILENAME_LEN] = {0};
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vgId);
dInfo("vgId:%d, start to alter vnode replica at %s", vgId, path);
@ -641,10 +689,11 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return -1;
}
dInfo("vgId:%d, vnode management handle msgType:%s, end to alter vnode replica:%d selfIndex:%d leanerReplica:%d "
"learnerSelfIndex:%d strict:%d",
vgId, TMSG_INFO(pMsg->msgType), alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica,
alterReq.learnerSelfIndex, alterReq.strict);
dInfo(
"vgId:%d, vnode management handle msgType:%s, end to alter vnode replica:%d selfIndex:%d leanerReplica:%d "
"learnerSelfIndex:%d strict:%d",
vgId, TMSG_INFO(pMsg->msgType), alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica,
alterReq.learnerSelfIndex, alterReq.strict);
return 0;
}
@ -742,7 +791,6 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_SCAN_HISTORY_FINISH, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_SCAN_HISTORY_FINISH_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TRANSFER_STATE, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECK, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECK_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
@ -759,6 +807,8 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_ALTER_VNODE_TYPE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_CONFIG_CHANGE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_TIMEOUT_ELECTION, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SYNC_CLIENT_REQUEST, vmPutMsgToSyncQueue, 0) == NULL) goto _OVER;

View File

@ -208,7 +208,7 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal)
if (pVnode->dropped) {
dInfo("vgId:%d, vnode is destroyed, dropped:%d", pVnode->vgId, pVnode->dropped);
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, pVnode->vgId);
vnodeDestroy(path, pMgmt->pTfs);
vnodeDestroy(pVnode->vgId, path, pMgmt->pTfs);
}
taosMemoryFree(pVnode->path);
@ -267,6 +267,7 @@ static void *vmOpenVnodeInThread(void *param) {
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, pCfg->vgId);
SVnode *pImpl = vnodeOpen(path, diskPrimary, pMgmt->pTfs, pMgmt->msgCb);
if (pImpl == NULL) {
dError("vgId:%d, failed to open vnode by thread:%d since %s", pCfg->vgId, pThread->threadIndex, terrstr());
pThread->failed++;

View File

@ -53,6 +53,9 @@ static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
case TDMT_DND_ALTER_VNODE_TYPE:
code = vmProcessAlterVnodeTypeReq(pMgmt, pMsg);
break;
case TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP:
code = vmProcessCheckLearnCatchupReq(pMgmt, pMsg);
break;
default:
terrno = TSDB_CODE_MSG_NOT_PROCESSED;
dGError("msg:%p, not processed in vnode-mgmt queue", pMsg);

View File

@ -100,7 +100,7 @@ int32_t dmReadEps(SDnodeData *pData) {
goto _OVER;
}
if (taosStatFile(file, NULL, NULL) < 0) {
if (taosStatFile(file, NULL, NULL, NULL) < 0) {
dInfo("dnode file:%s not exist", file);
code = 0;
goto _OVER;
@ -350,7 +350,7 @@ void dmRotateMnodeEpSet(SDnodeData *pData) {
}
void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet) {
if(!pData->validMnodeEps) return;
if (!pData->validMnodeEps) return;
dmGetMnodeEpSet(pData, pEpSet);
dTrace("msg is redirected, handle:%p num:%d use:%d", pMsg->info.handle, pEpSet->numOfEps, pEpSet->inUse);
for (int32_t i = 0; i < pEpSet->numOfEps; ++i) {
@ -469,7 +469,7 @@ static int32_t dmReadDnodePairs(SDnodeData *pData) {
char file[PATH_MAX] = {0};
snprintf(file, sizeof(file), "%s%sdnode%sep.json", tsDataDir, TD_DIRSEP, TD_DIRSEP);
if (taosStatFile(file, NULL, NULL) < 0) {
if (taosStatFile(file, NULL, NULL, NULL) < 0) {
dDebug("dnode file:%s not exist", file);
code = 0;
goto _OVER;

View File

@ -38,7 +38,7 @@ int32_t dmReadFile(const char *path, const char *name, bool *pDeployed) {
char file[PATH_MAX] = {0};
snprintf(file, sizeof(file), "%s%s%s.json", path, TD_DIRSEP, name);
if (taosStatFile(file, NULL, NULL) < 0) {
if (taosStatFile(file, NULL, NULL, NULL) < 0) {
dInfo("file:%s not exist", file);
code = 0;
goto _OVER;

View File

@ -347,9 +347,13 @@ typedef struct {
typedef struct {
int32_t dnodeId;
ESyncState syncState;
int64_t syncTerm;
bool syncRestore;
bool syncCanRead;
int64_t roleTimeMs;
int64_t startTimeMs;
ESyncRole nodeRole;
int32_t learnerProgress;
} SVnodeGid;
typedef struct {
@ -373,6 +377,7 @@ typedef struct {
SVnodeGid vnodeGid[TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA];
void* pTsma;
int32_t numOfCachedTables;
int32_t syncConfChangeVer;
} SVgObj;
typedef struct {

View File

@ -66,11 +66,13 @@ void mndReleaseTrans(SMnode *pMnode, STrans *pTrans);
STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict, const SRpcMsg *pReq,
const char *opername);
void mndTransDrop(STrans *pTrans);
int32_t mndTransAppendPrepareLog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendNullLog(STrans *pTrans);
int32_t mndTransAppendPrepareAction(STrans *pTrans, STransAction *pAction);
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction);
int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);

View File

@ -37,7 +37,7 @@ int64_t mndGetVgroupMemory(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup);
SArray *mndBuildDnodesArray(SMnode *, int32_t exceptDnodeId);
int32_t mndAllocSmaVgroup(SMnode *, SDbObj *pDb, SVgObj *pVgroup);
int32_t mndAllocVgroup(SMnode *, SDbObj *pDb, SVgObj **ppVgroups);
int32_t mndAddPrepareNewVgAction(SMnode *, STrans *pTrans, SVgObj *pVg);
int32_t mndAddNewVgPrepareAction(SMnode *, STrans *pTrans, SVgObj *pVg);
int32_t mndAddCreateVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid);
int32_t mndAddAlterVnodeConfirmAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup);
int32_t mndAddAlterVnodeAction(SMnode *, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, tmsg_t msgType);
@ -47,6 +47,8 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb
SArray *pArray);
int32_t mndBuildCompactVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int64_t compactTs,
STimeWindow tw);
int32_t mndBuildRaftAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pVgroup,
SArray *pArray);
void *mndBuildCreateVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen);
void *mndBuildDropVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen);

View File

@ -37,6 +37,8 @@ static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw);
static int32_t mndDbActionInsert(SSdb *pSdb, SDbObj *pDb);
static int32_t mndDbActionDelete(SSdb *pSdb, SDbObj *pDb);
static int32_t mndDbActionUpdate(SSdb *pSdb, SDbObj *pOld, SDbObj *pNew);
static int32_t mndNewDbActionValidate(SMnode *pMnode, STrans *pTrans, void *pObj);
static int32_t mndProcessCreateDbReq(SRpcMsg *pReq);
static int32_t mndProcessAlterDbReq(SRpcMsg *pReq);
static int32_t mndProcessDropDbReq(SRpcMsg *pReq);
@ -59,6 +61,7 @@ int32_t mndInitDb(SMnode *pMnode) {
.insertFp = (SdbInsertFp)mndDbActionInsert,
.updateFp = (SdbUpdateFp)mndDbActionUpdate,
.deleteFp = (SdbDeleteFp)mndDbActionDelete,
.validateFp = (SdbValidateFp)mndNewDbActionValidate,
};
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_DB, mndProcessCreateDbReq);
@ -247,6 +250,19 @@ _OVER:
return pRow;
}
static int32_t mndNewDbActionValidate(SMnode *pMnode, STrans *pTrans, void *pObj) {
SDbObj *pNewDb = pObj;
SDbObj *pOldDb = sdbAcquire(pMnode->pSdb, SDB_DB, pNewDb->name);
if (pOldDb != NULL) {
mError("trans:%d, db name already in use. name: %s", pTrans->id, pNewDb->name);
sdbRelease(pMnode->pSdb, pOldDb);
return -1;
}
return 0;
}
static int32_t mndDbActionInsert(SSdb *pSdb, SDbObj *pDb) {
mTrace("db:%s, perform insert action, row:%p", pDb->name, pDb);
return 0;
@ -448,9 +464,18 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) {
if (pCfg->tsdbPageSize <= 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE;
}
static int32_t mndSetPrepareNewVgActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
static int32_t mndSetCreateDbPrepareAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb) {
SSdbRaw *pDbRaw = mndDbActionEncode(pDb);
if (pDbRaw == NULL) return -1;
if (mndTransAppendPrepareLog(pTrans, pDbRaw) != 0) return -1;
if (sdbSetRawStatus(pDbRaw, SDB_STATUS_CREATING) != 0) return -1;
return 0;
}
static int32_t mndSetNewVgPrepareActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
for (int32_t v = 0; v < pDb->cfg.numOfVgroups; ++v) {
if (mndAddPrepareNewVgAction(pMnode, pTrans, (pVgroups + v)) != 0) return -1;
if (mndAddNewVgPrepareAction(pMnode, pTrans, (pVgroups + v)) != 0) return -1;
}
return 0;
}
@ -459,7 +484,7 @@ static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pD
SSdbRaw *pDbRaw = mndDbActionEncode(pDb);
if (pDbRaw == NULL) return -1;
if (mndTransAppendRedolog(pTrans, pDbRaw) != 0) return -1;
if (sdbSetRawStatus(pDbRaw, SDB_STATUS_CREATING) != 0) return -1;
if (sdbSetRawStatus(pDbRaw, SDB_STATUS_UPDATE) != 0) return -1;
for (int32_t v = 0; v < pDb->cfg.numOfVgroups; ++v) {
SSdbRaw *pVgRaw = mndVgroupActionEncode(pVgroups + v);
@ -633,11 +658,11 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
if (mndTransCheckConflict(pMnode, pTrans) != 0) goto _OVER;
mndTransSetOper(pTrans, MND_OPER_CREATE_DB);
if (mndSetPrepareNewVgActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbPrepareAction(pMnode, pTrans, &dbObj) != 0) goto _OVER;
if (mndSetCreateDbRedoActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetNewVgPrepareActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups, pNewUserDuped) != 0) goto _OVER;
if (mndSetCreateDbRedoActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbUndoActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
@ -666,7 +691,12 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) {
terrno = TSDB_CODE_INVALID_MSG;
goto _OVER;
}
#ifdef WINDOWS
if (taosArrayGetSize(createReq.pRetensions) > 0) {
terrno = TSDB_CODE_MND_INVALID_PLATFORM;
goto _OVER;
}
#endif
mInfo("db:%s, start to create, vgroups:%d", createReq.db, createReq.numOfVgroups);
if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_DB, NULL) != 0) {
goto _OVER;
@ -852,7 +882,7 @@ static int32_t mndSetAlterDbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *
if (pIter == NULL) break;
if (mndVgroupInDb(pVgroup, pNewDb->uid)) {
if (mndBuildAlterVgroupAction(pMnode, pTrans, pOldDb, pNewDb, pVgroup, pArray) != 0) {
if (mndBuildRaftAlterVgroupAction(pMnode, pTrans, pOldDb, pNewDb, pVgroup, pArray) != 0) {
sdbCancelFetch(pSdb, pIter);
sdbRelease(pSdb, pVgroup);
taosArrayDestroy(pArray);

View File

@ -424,6 +424,47 @@ static int32_t mndCheckClusterCfgPara(SMnode *pMnode, SDnodeObj *pDnode, const S
return 0;
}
static bool mndUpdateVnodeState(int32_t vgId, SVnodeGid *pGid, SVnodeLoad *pVload) {
bool stateChanged = false;
bool roleChanged = pGid->syncState != pVload->syncState ||
(pVload->syncTerm != -1 && pGid->syncTerm != pVload->syncTerm) ||
pGid->roleTimeMs != pVload->roleTimeMs;
if (roleChanged || pGid->syncRestore != pVload->syncRestore || pGid->syncCanRead != pVload->syncCanRead ||
pGid->startTimeMs != pVload->startTimeMs) {
mInfo(
"vgId:%d, state changed by status msg, old state:%s restored:%d canRead:%d new state:%s restored:%d "
"canRead:%d, dnode:%d",
vgId, syncStr(pGid->syncState), pGid->syncRestore, pGid->syncCanRead, syncStr(pVload->syncState),
pVload->syncRestore, pVload->syncCanRead, pGid->dnodeId);
pGid->syncState = pVload->syncState;
pGid->syncTerm = pVload->syncTerm;
pGid->syncRestore = pVload->syncRestore;
pGid->syncCanRead = pVload->syncCanRead;
pGid->startTimeMs = pVload->startTimeMs;
pGid->roleTimeMs = pVload->roleTimeMs;
stateChanged = true;
}
return stateChanged;
}
static bool mndUpdateMnodeState(SMnodeObj *pObj, SMnodeLoad *pMload) {
bool stateChanged = false;
bool roleChanged = pObj->syncState != pMload->syncState ||
(pMload->syncTerm != -1 && pObj->syncTerm != pMload->syncTerm) ||
pObj->roleTimeMs != pMload->roleTimeMs;
if (roleChanged || pObj->syncRestore != pMload->syncRestore) {
mInfo("dnode:%d, mnode syncState from %s to %s, restoreState from %d to %d, syncTerm from %" PRId64 " to %" PRId64,
pObj->id, syncStr(pObj->syncState), syncStr(pMload->syncState), pObj->syncRestore, pMload->syncRestore,
pObj->syncTerm, pMload->syncTerm);
pObj->syncState = pMload->syncState;
pObj->syncTerm = pMload->syncTerm;
pObj->syncRestore = pMload->syncRestore;
pObj->roleTimeMs = pMload->roleTimeMs;
stateChanged = true;
}
return stateChanged;
}
static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SStatusReq statusReq = {0};
@ -496,26 +537,21 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
pVgroup->compStorage = pVload->compStorage;
pVgroup->pointsWritten = pVload->pointsWritten;
}
bool roleChanged = false;
bool stateChanged = false;
for (int32_t vg = 0; vg < pVgroup->replica; ++vg) {
SVnodeGid *pGid = &pVgroup->vnodeGid[vg];
if (pGid->dnodeId == statusReq.dnodeId) {
if (pGid->syncState != pVload->syncState || pGid->syncRestore != pVload->syncRestore ||
pGid->syncCanRead != pVload->syncCanRead) {
mInfo(
"vgId:%d, state changed by status msg, old state:%s restored:%d canRead:%d new state:%s restored:%d "
"canRead:%d, dnode:%d",
pVgroup->vgId, syncStr(pGid->syncState), pGid->syncRestore, pGid->syncCanRead,
syncStr(pVload->syncState), pVload->syncRestore, pVload->syncCanRead, pDnode->id);
pGid->syncState = pVload->syncState;
pGid->syncRestore = pVload->syncRestore;
pGid->syncCanRead = pVload->syncCanRead;
roleChanged = true;
if (pVload->startTimeMs == 0) {
pVload->startTimeMs = statusReq.rebootTime;
}
if (pVload->roleTimeMs == 0) {
pVload->roleTimeMs = statusReq.rebootTime;
}
stateChanged = mndUpdateVnodeState(pVgroup->vgId, pGid, pVload);
break;
}
}
if (roleChanged) {
if (stateChanged) {
SDbObj *pDb = mndAcquireDb(pMnode, pVgroup->dbName);
if (pDb != NULL && pDb->stateTs != curMs) {
mInfo("db:%s, stateTs changed by status msg, old stateTs:%" PRId64 " new stateTs:%" PRId64, pDb->name,
@ -531,23 +567,10 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
SMnodeObj *pObj = mndAcquireMnode(pMnode, pDnode->id);
if (pObj != NULL) {
bool roleChanged = pObj->syncState != statusReq.mload.syncState ||
(statusReq.mload.syncTerm != -1 && pObj->syncTerm != statusReq.mload.syncTerm);
bool restoreChanged = pObj->syncRestore != statusReq.mload.syncRestore;
if (roleChanged || restoreChanged) {
mInfo("dnode:%d, mnode syncState from %s to %s, restoreState from %d to %d, syncTerm from %" PRId64
" to %" PRId64,
pObj->id, syncStr(pObj->syncState), syncStr(statusReq.mload.syncState), pObj->syncRestore,
statusReq.mload.syncRestore, pObj->syncTerm, statusReq.mload.syncTerm);
pObj->syncState = statusReq.mload.syncState;
pObj->syncRestore = statusReq.mload.syncRestore;
pObj->syncTerm = statusReq.mload.syncTerm;
if (statusReq.mload.roleTimeMs == 0) {
statusReq.mload.roleTimeMs = statusReq.rebootTime;
}
if (roleChanged) {
pObj->roleTimeMs = (statusReq.mload.roleTimeMs != 0) ? statusReq.mload.roleTimeMs : taosGetTimestampMs();
}
mndUpdateMnodeState(pObj, &statusReq.mload);
mndReleaseMnode(pMnode, pObj);
}

View File

@ -359,7 +359,10 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) {
terrno = TSDB_CODE_INVALID_MSG;
goto _OVER;
}
#ifdef WINDOWS
terrno = TSDB_CODE_MND_INVALID_PLATFORM;
goto _OVER;
#endif
mInfo("func:%s, start to create, size:%d", createReq.name, createReq.codeLen);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_FUNC) != 0) {
goto _OVER;

View File

@ -79,9 +79,12 @@ int32_t mndInitIdx(SMnode *pMnode) {
return sdbSetTable(pMnode->pSdb, table);
}
static int32_t mndFindSuperTableTagId(const SStbObj *pStb, const char *tagName) {
static int32_t mndFindSuperTableTagId(const SStbObj *pStb, const char *tagName, int8_t *hasIdx) {
for (int32_t tag = 0; tag < pStb->numOfTags; tag++) {
if (strcasecmp(pStb->pTags[tag].name, tagName) == 0) {
if (IS_IDX_ON(&pStb->pTags[tag])) {
*hasIdx = 1;
}
return tag;
}
}
@ -597,7 +600,8 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb
pNew->updateTime = taosGetTimestampMs();
pNew->lock = 0;
int32_t tag = mndFindSuperTableTagId(pOld, tagName);
int8_t hasIdx = 0;
int32_t tag = mndFindSuperTableTagId(pOld, tagName, &hasIdx);
if (tag < 0) {
terrno = TSDB_CODE_MND_TAG_NOT_EXIST;
return -1;
@ -612,14 +616,14 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb
SSchema *pTag = pNew->pTags + tag;
if (on == 1) {
if (IS_IDX_ON(pTag)) {
if (hasIdx && tag != 0) {
terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
return -1;
} else {
SSCHMEA_SET_IDX_ON(pTag);
}
} else {
if (!IS_IDX_ON(pTag)) {
if (hasIdx == 0) {
terrno = TSDB_CODE_MND_SMA_NOT_EXIST;
} else {
SSCHMEA_SET_IDX_OFF(pTag);
@ -667,7 +671,42 @@ _OVER:
mndTransDrop(pTrans);
return code;
}
int8_t mndCheckIndexNameByTagName(SMnode *pMnode, SIdxObj *pIdxObj) {
// build index on first tag, and no index name;
int8_t exist = 0;
SDbObj *pDb = NULL;
if (strlen(pIdxObj->db) > 0) {
pDb = mndAcquireDb(pMnode, pIdxObj->db);
if (pDb == NULL) return 0;
}
SSmaAndTagIter *pIter = NULL;
SIdxObj *pIdx = NULL;
SSdb *pSdb = pMnode->pSdb;
while (1) {
pIter = sdbFetch(pSdb, SDB_IDX, pIter, (void **)&pIdx);
if (pIter == NULL) break;
if (NULL != pDb && pIdx->dbUid != pDb->uid) {
sdbRelease(pSdb, pIdx);
continue;
}
if (pIdxObj->stbUid != pIdx->stbUid) {
sdbRelease(pSdb, pIdx);
continue;
}
if (strncmp(pIdxObj->colName, pIdx->colName, TSDB_COL_NAME_LEN) == 0) {
sdbCancelFetch(pSdb, pIter);
sdbRelease(pSdb, pIdx);
exist = 1;
break;
}
sdbRelease(pSdb, pIdx);
}
mndReleaseDb(pMnode, pDb);
return exist;
}
static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *req, SDbObj *pDb, SStbObj *pStb) {
int32_t code = -1;
SIdxObj idxObj = {0};
@ -681,11 +720,20 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re
idxObj.stbUid = pStb->uid;
idxObj.dbUid = pStb->dbUid;
int32_t tag = mndFindSuperTableTagId(pStb, req->colName);
int8_t hasIdx = 0;
int32_t tag = mndFindSuperTableTagId(pStb, req->colName, &hasIdx);
if (tag < 0) {
terrno = TSDB_CODE_MND_TAG_NOT_EXIST;
return -1;
} else if (tag == 0) {
}
int8_t exist = 0;
if (tag == 0 && hasIdx == 1) {
exist = mndCheckIndexNameByTagName(pMnode, &idxObj);
if (exist) {
terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
return -1;
}
} else if (hasIdx == 1) {
terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
return -1;
}
@ -695,11 +743,11 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re
return -1;
}
SSchema *pTag = pStb->pTags + tag;
if (IS_IDX_ON(pTag)) {
terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
return -1;
}
// SSchema *pTag = pStb->pTags + tag;
// if (IS_IDX_ON(pTag)) {
// terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
// return -1;
// }
code = mndAddIndexImpl(pMnode, pReq, pDb, pStb, &idxObj);
return code;
@ -806,8 +854,8 @@ int32_t mndDropIdxsByStb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p
if (pIdx->stbUid == pStb->uid) {
if (mndSetDropIdxCommitLogs(pMnode, pTrans, pIdx) != 0) {
sdbCancelFetch(pSdb, pIter);
sdbRelease(pSdb, pIdx);
sdbCancelFetch(pSdb, pIdx);
return -1;
}
}

View File

@ -185,7 +185,7 @@ static void mndSetVgroupOffline(SMnode *pMnode, int32_t dnodeId, int64_t curMs)
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
bool roleChanged = false;
bool stateChanged = false;
for (int32_t vg = 0; vg < pVgroup->replica; ++vg) {
SVnodeGid *pGid = &pVgroup->vnodeGid[vg];
if (pGid->dnodeId == dnodeId) {
@ -197,13 +197,14 @@ static void mndSetVgroupOffline(SMnode *pMnode, int32_t dnodeId, int64_t curMs)
pGid->syncState = TAOS_SYNC_STATE_OFFLINE;
pGid->syncRestore = 0;
pGid->syncCanRead = 0;
roleChanged = true;
pGid->startTimeMs = 0;
stateChanged = true;
}
break;
}
}
if (roleChanged) {
if (stateChanged) {
SDbObj *pDb = mndAcquireDb(pMnode, pVgroup->dbName);
if (pDb != NULL && pDb->stateTs != curMs) {
mInfo("db:%s, stateTs changed by offline check, old newTs:%" PRId64 " newTs:%" PRId64, pDb->name, pDb->stateTs,

View File

@ -807,7 +807,6 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
ESdbStatus objStatus = 0;
char *pWrite;
int64_t curMs = taosGetTimestampMs();
int64_t dummyTimeMs = 0;
pSelfObj = sdbAcquire(pSdb, SDB_MNODE, &pMnode->selfDnodeId);
if (pSelfObj == NULL) {
@ -858,16 +857,9 @@ static int32_t mndRetrieveMnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->createdTime, false);
int64_t roleTimeMs = (isDnodeOnline) ? pObj->roleTimeMs : 0;
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
if (pObj->syncTerm != pSelfObj->syncTerm || !isDnodeOnline) {
// state of old term / no status report => use dummyTimeMs
if (pObj->syncTerm > pSelfObj->syncTerm) {
mError("mnode:%d has a newer term:%" PRId64 " than me:%" PRId64, pObj->id, pObj->syncTerm, pSelfObj->syncTerm);
}
colDataSetVal(pColInfo, numOfRows, (const char *)&dummyTimeMs, false);
} else {
colDataSetVal(pColInfo, numOfRows, (const char *)&pObj->roleTimeMs, false);
}
colDataSetVal(pColInfo, numOfRows, (const char *)&roleTimeMs, false);
numOfRows++;
sdbRelease(pSdb, pObj);

View File

@ -58,8 +58,10 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
type = TSDB_MGMT_TABLE_DNODE;
} else if (strncasecmp(name, TSDB_INS_TABLE_MNODES, len) == 0) {
type = TSDB_MGMT_TABLE_MNODE;
/*
} else if (strncasecmp(name, TSDB_INS_TABLE_MODULES, len) == 0) {
type = TSDB_MGMT_TABLE_MODULE;
*/
} else if (strncasecmp(name, TSDB_INS_TABLE_QNODES, len) == 0) {
type = TSDB_MGMT_TABLE_QNODE;
} else if (strncasecmp(name, TSDB_INS_TABLE_SNODES, len) == 0) {

View File

@ -631,7 +631,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to create sma:%s stream:%s", pTrans->id, pCreate->name, streamObj.name);
if (mndAddPrepareNewVgAction(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
if (mndAddNewVgPrepareAction(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
if (mndSetCreateSmaCommitLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
@ -705,7 +705,10 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) {
terrno = TSDB_CODE_INVALID_MSG;
goto _OVER;
}
#ifdef WINDOWS
terrno = TSDB_CODE_MND_INVALID_PLATFORM;
goto _OVER;
#endif
mInfo("sma:%s, start to create", createReq.name);
if (mndCheckCreateSmaReq(&createReq) != 0) {
goto _OVER;

View File

@ -18,6 +18,7 @@
#include "mndDb.h"
#include "mndDnode.h"
#include "mndIndex.h"
#include "mndIndexComm.h"
#include "mndInfoSchema.h"
#include "mndMnode.h"
#include "mndPerfSchema.h"
@ -822,7 +823,7 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
return -1;
}
if(pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags){
if (pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags) {
terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW;
return -1;
}
@ -834,6 +835,7 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
pSchema->bytes = pField->bytes;
pSchema->flags = pField->flags;
memcpy(pSchema->name, pField->name, TSDB_COL_NAME_LEN);
memcpy(pSchema->comment, pField->comment, TSDB_COL_COMMENT_LEN);
pSchema->colId = pDst->nextColId;
pDst->nextColId++;
}
@ -847,6 +849,7 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
SSCHMEA_SET_IDX_ON(pSchema);
}
memcpy(pSchema->name, pField->name, TSDB_COL_NAME_LEN);
memcpy(pSchema->comment, pField->comment, TSDB_COL_COMMENT_LEN);
pSchema->colId = pDst->nextColId;
pDst->nextColId++;
}
@ -857,11 +860,39 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea
SStbObj stbObj = {0};
int32_t code = -1;
char fullIdxName[TSDB_INDEX_FNAME_LEN * 2] = {0};
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "create-stb");
if (pTrans == NULL) goto _OVER;
mInfo("trans:%d, used to create stb:%s", pTrans->id, pCreate->name);
if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER;
char randStr[24] = {0};
taosRandStr2(randStr, tListLen(randStr) - 1);
SSchema *pSchema = &(stbObj.pTags[0]);
sprintf(fullIdxName, "%s.%s_%s", pDb->name, pSchema->name, randStr);
SSIdx idx = {0};
if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0 && idx.pIdx != NULL) {
terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
mndReleaseIdx(pMnode, idx.pIdx);
goto _OVER;
}
SIdxObj idxObj = {0};
memcpy(idxObj.name, fullIdxName, TSDB_INDEX_FNAME_LEN);
memcpy(idxObj.stb, stbObj.name, TSDB_TABLE_FNAME_LEN);
memcpy(idxObj.db, stbObj.db, TSDB_DB_FNAME_LEN);
memcpy(idxObj.colName, pSchema->name, TSDB_COL_NAME_LEN);
idxObj.createdTime = taosGetTimestampMs();
idxObj.uid = mndGenerateUid(fullIdxName, strlen(fullIdxName));
idxObj.stbUid = stbObj.uid;
idxObj.dbUid = stbObj.dbUid;
if (mndSetCreateIdxCommitLogs(pMnode, pTrans, &idxObj) < 0) goto _OVER;
if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
code = 0;
@ -956,7 +987,7 @@ static int32_t mndBuildStbFromAlter(SStbObj *pStb, SStbObj *pDst, SMCreateStbReq
return -1;
}
if(pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags){
if (pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags) {
terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW;
return -1;
}
@ -1188,7 +1219,7 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p
return -1;
}
if(pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ntags){
if (pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ntags) {
terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW;
return -1;
}
@ -1478,7 +1509,8 @@ static int32_t mndAlterStbTagBytes(SMnode *pMnode, const SStbObj *pOld, SStbObj
SSchema *pTag = pNew->pTags + tag;
if (!(pTag->type == TSDB_DATA_TYPE_BINARY || pTag->type == TSDB_DATA_TYPE_NCHAR || pTag->type == TSDB_DATA_TYPE_GEOMETRY)) {
if (!(pTag->type == TSDB_DATA_TYPE_BINARY || pTag->type == TSDB_DATA_TYPE_NCHAR ||
pTag->type == TSDB_DATA_TYPE_GEOMETRY)) {
terrno = TSDB_CODE_MND_INVALID_STB_OPTION;
return -1;
}
@ -1506,7 +1538,7 @@ static int32_t mndAddSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, SArray
return -1;
}
if(pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ncols){
if (pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ncols) {
terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW;
return -1;
}
@ -1578,9 +1610,11 @@ static int32_t mndAlterStbColumnBytes(SMnode *pMnode, const SStbObj *pOld, SStbO
return -1;
}
col_id_t colId = pOld->pColumns[col].colId;
uint32_t nLen = 0;
for (int32_t i = 0; i < pOld->numOfColumns; ++i) {
nLen += (pOld->pColumns[i].colId == col) ? pField->bytes : pOld->pColumns[i].bytes;
nLen += (pOld->pColumns[i].colId == colId) ? pField->bytes : pOld->pColumns[i].bytes;
}
if (nLen > TSDB_MAX_BYTES_PER_ROW) {
@ -1588,7 +1622,6 @@ static int32_t mndAlterStbColumnBytes(SMnode *pMnode, const SStbObj *pOld, SStbO
return -1;
}
col_id_t colId = pOld->pColumns[col].colId;
if (mndCheckColAndTagModifiable(pMnode, pOld->name, pOld->uid, colId) != 0) {
return -1;
}
@ -1598,7 +1631,8 @@ static int32_t mndAlterStbColumnBytes(SMnode *pMnode, const SStbObj *pOld, SStbO
}
SSchema *pCol = pNew->pColumns + col;
if (!(pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR || pCol->type == TSDB_DATA_TYPE_GEOMETRY)) {
if (!(pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR ||
pCol->type == TSDB_DATA_TYPE_GEOMETRY)) {
terrno = TSDB_CODE_MND_INVALID_STB_OPTION;
return -1;
}
@ -3182,7 +3216,6 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
SSdb *pSdb = pMnode->pSdb;
SStbObj *pStb = NULL;
int32_t numOfRows = 0;
if (!pShow->sysDbRsp) {
numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb);
@ -3206,7 +3239,7 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
if (pShow->pIter == NULL) break;
} else {
fetch = true;
void *pKey = taosHashGetKey(pShow->pIter, NULL);
void *pKey = taosHashGetKey(pShow->pIter, NULL);
pStb = sdbAcquire(pSdb, SDB_STB, pKey);
if (!pStb) continue;
}

View File

@ -692,7 +692,10 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
terrno = TSDB_CODE_INVALID_MSG;
goto _OVER;
}
#ifdef WINDOWS
terrno = TSDB_CODE_MND_INVALID_PLATFORM;
goto _OVER;
#endif
mInfo("stream:%s, start to create, sql:%s", createStreamReq.name, createStreamReq.sql);
if (mndCheckCreateStreamReq(&createStreamReq) != 0) {

View File

@ -244,7 +244,7 @@ static void doRemoveLostConsumers(SMqRebOutputObj *pOutput, SHashObj *pHash, con
SMqRebOutputVg outputVg = {.oldConsumerId = consumerId, .newConsumerId = -1, .pVgEp = pVgEp};
taosHashPut(pHash, &pVgEp->vgId, sizeof(int32_t), &outputVg, sizeof(SMqRebOutputVg));
mInfo("sub:%s mq re-balance remove vgId:%d from consumer:%" PRIx64, pSubKey, pVgEp->vgId, consumerId);
mInfo("sub:%s mq re-balance remove vgId:%d from consumer:0x%" PRIx64, pSubKey, pVgEp->vgId, consumerId);
}
taosArrayDestroy(pConsumerEp->vgs);
@ -489,7 +489,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
SMqVgEp *pVgEp = taosArrayGetP(pConsumerEpNew->vgs, i);
if(pVgEp->vgId == d1->vgId){
jump = true;
mInfo("pSub->offsetRows jump, because consumer id:%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId);
mInfo("pSub->offsetRows jump, because consumer id:0x%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId);
break;
}
}

View File

@ -17,7 +17,6 @@
#include "mndSync.h"
#include "mndCluster.h"
#include "mndTrans.h"
#include "mndVgroup.h"
static int32_t mndSyncEqCtrlMsg(const SMsgCb *msgcb, SRpcMsg *pMsg) {
if (pMsg == NULL || pMsg->pCont == NULL) {
@ -75,25 +74,25 @@ static int32_t mndSyncSendMsg(const SEpSet *pEpSet, SRpcMsg *pMsg) {
}
static int32_t mndTransValidatePrepareAction(SMnode *pMnode, STrans *pTrans, STransAction *pAction) {
SSdbRaw *pRaw = pAction->pRaw;
SSdb *pSdb = pMnode->pSdb;
SSdbRow *pRow = NULL;
int32_t code = -1;
void *pObj = NULL;
int code = -1;
if (pAction->msgType == TDMT_MND_CREATE_VG) {
pRow = mndVgroupActionDecode(pAction->pRaw);
if (pRow == NULL) goto _OUT;
if (pRaw->status != SDB_STATUS_CREATING) goto _OUT;
SVgObj *pVgroup = sdbGetRowObj(pRow);
if (pVgroup == NULL) goto _OUT;
pRow = (pSdb->decodeFps[pRaw->type])(pRaw);
if (pRow == NULL) goto _OUT;
pObj = sdbGetRowObj(pRow);
if (pObj == NULL) goto _OUT;
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
if (maxVgId > pVgroup->vgId) {
mError("trans:%d, failed to satisfy vgroup id %d of prepare action. maxVgId:%d", pTrans->id, pVgroup->vgId,
maxVgId);
goto _OUT;
}
SdbValidateFp validateFp = pSdb->validateFps[pRaw->type];
code = 0;
if (validateFp) {
code = validateFp(pMnode, pTrans, pObj);
}
code = 0;
_OUT:
taosMemoryFreeClear(pRow);
return code;
@ -476,7 +475,7 @@ int32_t mndInitSync(SMnode *pMnode) {
}
tsem_init(&pMgmt->syncSem, 0, 0);
pMgmt->sync = syncOpen(&syncInfo);
pMgmt->sync = syncOpen(&syncInfo, true);
if (pMgmt->sync <= 0) {
mError("failed to open sync since %s", terrstr());
return -1;

View File

@ -655,11 +655,10 @@ int32_t mndTransAppendCommitlog(STrans *pTrans, SSdbRaw *pRaw) {
return mndTransAppendAction(pTrans->commitActions, &action);
}
int32_t mndTransAppendPrepareAction(STrans *pTrans, STransAction *pAction) {
pAction->stage = TRN_STAGE_PREPARE;
pAction->actionType = TRANS_ACTION_RAW;
pAction->mTraceId = pTrans->mTraceId;
return mndTransAppendAction(pTrans->prepareActions, pAction);
int32_t mndTransAppendPrepareLog(STrans *pTrans, SSdbRaw *pRaw) {
STransAction action = {
.pRaw = pRaw, .stage = TRN_STAGE_PREPARE, .actionType = TRANS_ACTION_RAW, .mTraceId = pTrans->mTraceId};
return mndTransAppendAction(pTrans->prepareActions, &action);
}
int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction) {
@ -1125,7 +1124,7 @@ static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransActio
int32_t code = tmsgSendReq(&pAction->epSet, &rpcMsg);
if (code == 0) {
pAction->msgSent = 1;
pAction->msgReceived = 0;
//pAction->msgReceived = 0;
pAction->errCode = TSDB_CODE_ACTION_IN_PROGRESS;
mInfo("trans:%d, %s:%d is sent, %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, detail);

View File

@ -33,6 +33,7 @@
static int32_t mndVgroupActionInsert(SSdb *pSdb, SVgObj *pVgroup);
static int32_t mndVgroupActionDelete(SSdb *pSdb, SVgObj *pVgroup);
static int32_t mndVgroupActionUpdate(SSdb *pSdb, SVgObj *pOld, SVgObj *pNew);
static int32_t mndNewVgActionValidate(SMnode *pMnode, STrans *pTrans, void *pObj);
static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
static void mndCancelGetNextVgroup(SMnode *pMnode, void *pIter);
@ -53,6 +54,7 @@ int32_t mndInitVgroup(SMnode *pMnode) {
.insertFp = (SdbInsertFp)mndVgroupActionInsert,
.updateFp = (SdbUpdateFp)mndVgroupActionUpdate,
.deleteFp = (SdbDeleteFp)mndVgroupActionDelete,
.validateFp = (SdbValidateFp)mndNewVgActionValidate,
};
mndSetMsgHandle(pMnode, TDMT_DND_CREATE_VNODE_RSP, mndTransProcessRsp);
@ -65,6 +67,8 @@ int32_t mndInitVgroup(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_VND_DISABLE_WRITE_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_SYNC_FORCE_FOLLOWER_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_DND_ALTER_VNODE_TYPE_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_SYNC_CONFIG_CHANGE_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_MND_REDISTRIBUTE_VGROUP, mndProcessRedistributeVgroupMsg);
mndSetMsgHandle(pMnode, TDMT_MND_SPLIT_VGROUP, mndProcessSplitVgroupMsg);
@ -103,6 +107,7 @@ SSdbRaw *mndVgroupActionEncode(SVgObj *pVgroup) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
SDB_SET_INT32(pRaw, dataPos, pVgid->dnodeId, _OVER)
}
SDB_SET_INT32(pRaw, dataPos, pVgroup->syncConfChangeVer, _OVER)
SDB_SET_RESERVE(pRaw, dataPos, VGROUP_RESERVE_SIZE, _OVER)
SDB_SET_DATALEN(pRaw, dataPos, _OVER)
@ -127,7 +132,7 @@ SSdbRow *mndVgroupActionDecode(SSdbRaw *pRaw) {
int8_t sver = 0;
if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto _OVER;
if (sver != VGROUP_VER_NUMBER) {
if (sver < 1 || sver > VGROUP_VER_NUMBER) {
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
goto _OVER;
}
@ -156,6 +161,10 @@ SSdbRow *mndVgroupActionDecode(SSdbRaw *pRaw) {
pVgid->syncState = TAOS_SYNC_STATE_LEADER;
}
}
if(dataPos + sizeof(int32_t) + VGROUP_RESERVE_SIZE <= pRaw->dataLen){
SDB_GET_INT32(pRaw, dataPos, &pVgroup->syncConfChangeVer, _OVER)
}
SDB_GET_RESERVE(pRaw, dataPos, VGROUP_RESERVE_SIZE, _OVER)
terrno = 0;
@ -171,6 +180,17 @@ _OVER:
return pRow;
}
static int32_t mndNewVgActionValidate(SMnode *pMnode, STrans *pTrans, void *pObj) {
SVgObj *pVgroup = pObj;
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
if (maxVgId > pVgroup->vgId) {
mError("trans:%d, vgroup id %d already in use. maxVgId:%d", pTrans->id, pVgroup->vgId, maxVgId);
return -1;
}
return 0;
}
static int32_t mndVgroupActionInsert(SSdb *pSdb, SVgObj *pVgroup) {
mTrace("vgId:%d, perform insert action, row:%p", pVgroup->vgId, pVgroup);
return 0;
@ -207,6 +227,7 @@ static int32_t mndVgroupActionUpdate(SSdb *pSdb, SVgObj *pOld, SVgObj *pNew) {
pNew->pointsWritten = pOld->pointsWritten;
pNew->compact = pOld->compact;
memcpy(pOld->vnodeGid, pNew->vnodeGid, (TSDB_MAX_REPLICA + TSDB_MAX_LEARNER_REPLICA) * sizeof(SVnodeGid));
pOld->syncConfChangeVer = pNew->syncConfChangeVer;
return 0;
}
@ -266,6 +287,7 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
createReq.hashPrefix = pDb->cfg.hashPrefix;
createReq.hashSuffix = pDb->cfg.hashSuffix;
createReq.tsdbPageSize = pDb->cfg.tsdbPageSize;
createReq.changeVersion= ++(pVgroup->syncConfChangeVer);
for (int32_t v = 0; v < pVgroup->replica; ++v) {
SReplica *pReplica = NULL;
@ -312,9 +334,12 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
return NULL;
}
mInfo("vgId:%d, build create vnode req, replica:%d selfIndex:%d learnerReplica:%d learnerSelfIndex:%d strict:%d",
createReq.changeVersion = pVgroup->syncConfChangeVer;
mInfo("vgId:%d, build create vnode req, replica:%d selfIndex:%d learnerReplica:%d learnerSelfIndex:%d strict:%d "
"changeVersion:%d",
createReq.vgId, createReq.replica, createReq.selfIndex, createReq.learnerReplica,
createReq.learnerReplica, createReq.strict);
createReq.learnerReplica, createReq.strict, createReq.changeVersion);
for (int32_t i = 0; i < createReq.replica; ++i) {
mInfo("vgId:%d, replica:%d ep:%s:%u", createReq.vgId, i, createReq.replicas[i].fqdn, createReq.replicas[i].port);
}
@ -392,6 +417,7 @@ static void *mndBuildAlterVnodeReplicaReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
.learnerReplica = 0,
.selfIndex = -1,
.learnerSelfIndex = -1,
.changeVersion = ++(pVgroup->syncConfChangeVer),
};
for (int32_t v = 0; v < pVgroup->replica; ++v) {
@ -427,9 +453,10 @@ static void *mndBuildAlterVnodeReplicaReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
}
}
mInfo("vgId:%d, build alter vnode req, replica:%d selfIndex:%d learnerReplica:%d learnerSelfIndex:%d strict:%d",
mInfo("vgId:%d, build alter vnode req, replica:%d selfIndex:%d learnerReplica:%d learnerSelfIndex:%d strict:%d "
"changeVersion:%d",
alterReq.vgId, alterReq.replica, alterReq.selfIndex, alterReq.learnerReplica,
alterReq.learnerSelfIndex, alterReq.strict);
alterReq.learnerSelfIndex, alterReq.strict, alterReq.changeVersion);
for (int32_t i = 0; i < alterReq.replica; ++i) {
mInfo("vgId:%d, replica:%d ep:%s:%u", alterReq.vgId, i, alterReq.replicas[i].fqdn, alterReq.replicas[i].port);
}
@ -460,6 +487,83 @@ static void *mndBuildAlterVnodeReplicaReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
return pReq;
}
static void *mndBuildCheckLearnCatchupReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_t dnodeId,
int32_t *pContLen) {
SCheckLearnCatchupReq req = {
.vgId = pVgroup->vgId,
.strict = pDb->cfg.strict,
.replica = 0,
.learnerReplica = 0,
.selfIndex = -1,
.learnerSelfIndex = -1,
};
for (int32_t v = 0; v < pVgroup->replica; ++v) {
SReplica *pReplica = NULL;
if(pVgroup->vnodeGid[v].nodeRole == TAOS_SYNC_ROLE_VOTER){
pReplica = &req.replicas[req.replica];
req.replica++;
}
else{
pReplica = &req.learnerReplicas[req.learnerReplica];
req.learnerReplica++;
}
SVnodeGid *pVgid = &pVgroup->vnodeGid[v];
SDnodeObj *pVgidDnode = mndAcquireDnode(pMnode, pVgid->dnodeId);
if (pVgidDnode == NULL) return NULL;
pReplica->id = pVgidDnode->id;
pReplica->port = pVgidDnode->port;
memcpy(pReplica->fqdn, pVgidDnode->fqdn, TSDB_FQDN_LEN);
mndReleaseDnode(pMnode, pVgidDnode);
if(pVgroup->vnodeGid[v].nodeRole == TAOS_SYNC_ROLE_VOTER){
if (dnodeId == pVgid->dnodeId) {
req.selfIndex = v;
}
}
else{
if (dnodeId == pVgid->dnodeId) {
req.learnerSelfIndex = v;
}
}
}
mInfo("vgId:%d, build alter vnode req, replica:%d selfIndex:%d learnerReplica:%d learnerSelfIndex:%d strict:%d",
req.vgId, req.replica, req.selfIndex, req.learnerReplica,
req.learnerSelfIndex, req.strict);
for (int32_t i = 0; i < req.replica; ++i) {
mInfo("vgId:%d, replica:%d ep:%s:%u", req.vgId, i, req.replicas[i].fqdn, req.replicas[i].port);
}
for (int32_t i = 0; i < req.learnerReplica; ++i) {
mInfo("vgId:%d, learnerReplica:%d ep:%s:%u", req.vgId, i,
req.learnerReplicas[i].fqdn, req.learnerReplicas[i].port);
}
if (req.selfIndex == -1 && req.learnerSelfIndex == -1) {
terrno = TSDB_CODE_APP_ERROR;
return NULL;
}
int32_t contLen = tSerializeSAlterVnodeReplicaReq(NULL, 0, &req);
if (contLen < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
void *pReq = taosMemoryMalloc(contLen);
if (pReq == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
tSerializeSAlterVnodeReplicaReq(pReq, contLen, &req);
*pContLen = contLen;
return pReq;
}
static void *mndBuildDisableVnodeWriteReq(SMnode *pMnode, SDbObj *pDb, int32_t vgId, int32_t *pContLen) {
SDisableVnodeWriteReq disableReq = {
.vgId = vgId,
@ -490,6 +594,7 @@ static void *mndBuildAlterVnodeHashRangeReq(SMnode *pMnode, int32_t srcVgId, SVg
.dstVgId = pVgroup->vgId,
.hashBegin = pVgroup->hashBegin,
.hashEnd = pVgroup->hashEnd,
.changeVersion = ++(pVgroup->syncConfChangeVer),
};
mInfo("vgId:%d, build alter vnode hashrange req, dstVgId:%d, hashrange:[%u, %u]", srcVgId, pVgroup->vgId,
@ -848,6 +953,28 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
}
}
snprintf(role, sizeof(role), "%s%s", syncStr(pVgroup->vnodeGid[i].syncState), star);
/*
mInfo("db:%s, learner progress:%d", pDb->name, pVgroup->vnodeGid[i].learnerProgress);
if (pVgroup->vnodeGid[i].syncState == TAOS_SYNC_STATE_LEARNER) {
if(pVgroup->vnodeGid[i].learnerProgress < 0){
snprintf(role, sizeof(role), "%s-",
syncStr(pVgroup->vnodeGid[i].syncState));
}
else if(pVgroup->vnodeGid[i].learnerProgress >= 100){
snprintf(role, sizeof(role), "%s--",
syncStr(pVgroup->vnodeGid[i].syncState));
}
else{
snprintf(role, sizeof(role), "%s%d",
syncStr(pVgroup->vnodeGid[i].syncState), pVgroup->vnodeGid[i].learnerProgress);
}
}
else{
snprintf(role, sizeof(role), "%s%s", syncStr(pVgroup->vnodeGid[i].syncState), star);
}
*/
} else {
}
STR_WITH_MAXSIZE_TO_VARSTR(buf1, role, pShow->pMeta->pSchemas[cols].bytes);
@ -961,27 +1088,24 @@ static int32_t mndRetrieveVnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
int32_t numOfRows = 0;
SVgObj *pVgroup = NULL;
int32_t cols = 0;
int64_t curMs = taosGetTimestampMs();
while (numOfRows < rows) {
pShow->pIter = sdbFetch(pSdb, SDB_VGROUP, pShow->pIter, (void **)&pVgroup);
if (pShow->pIter == NULL) break;
for (int32_t i = 0; i < pVgroup->replica && numOfRows < rows; ++i) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
SVnodeGid *pGid = &pVgroup->vnodeGid[i];
SColumnInfoData *pColInfo = NULL;
cols = 0;
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pGid->dnodeId, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pVgroup->vgId, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pVgroup->replica, false);
char buf[20] = {0};
STR_TO_VARSTR(buf, syncStr(pVgid->syncState));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)buf, false);
// db_name
const char *dbname = mndGetDbStr(pVgroup->dbName);
char b1[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
if (dbname != NULL) {
@ -992,20 +1116,33 @@ static int32_t mndRetrieveVnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)b1, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pVgid->dnodeId, false);
SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId);
char b2[TSDB_EP_LEN + VARSTR_HEADER_SIZE] = {0};
if (pDnode != NULL) {
STR_WITH_MAXSIZE_TO_VARSTR(b2, pDnode->ep, TSDB_EP_LEN + VARSTR_HEADER_SIZE);
} else {
STR_WITH_MAXSIZE_TO_VARSTR(b2, "NULL", TSDB_EP_LEN + VARSTR_HEADER_SIZE);
// dnode is online?
SDnodeObj *pDnode = mndAcquireDnode(pMnode, pGid->dnodeId);
if (pDnode == NULL) {
mError("failed to acquire dnode. dnodeId:%d", pGid->dnodeId);
break;
}
bool isDnodeOnline = mndIsDnodeOnline(pDnode, curMs);
char buf[20] = {0};
ESyncState syncState = (isDnodeOnline) ? pGid->syncState : TAOS_SYNC_STATE_OFFLINE;
STR_TO_VARSTR(buf, syncStr(syncState));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)b2, false);
colDataSetVal(pColInfo, numOfRows, (const char *)buf, false);
int64_t roleTimeMs = (isDnodeOnline) ? pGid->roleTimeMs : 0;
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&roleTimeMs, false);
int64_t startTimeMs = (isDnodeOnline) ? pGid->startTimeMs : 0;
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&startTimeMs, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pGid->syncRestore, false);
numOfRows++;
sdbRelease(pSdb, pDnode);
}
sdbRelease(pSdb, pVgroup);
@ -1132,6 +1269,51 @@ _OVER:
return 0;
}
static int32_t mndRemoveVnodeFromVgroupWithoutSave(SMnode *pMnode, STrans *pTrans, SVgObj *pVgroup, SArray *pArray,
SVnodeGid *pDelVgid) {
taosArraySort(pArray, (__compar_fn_t)mndCompareDnodeVnodes);
for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
SDnodeObj *pDnode = taosArrayGet(pArray, i);
mInfo("dnode:%d, equivalent vnodes:%d others:%d", pDnode->id, pDnode->numOfVnodes, pDnode->numOfOtherNodes);
}
int32_t code = -1;
for (int32_t d = taosArrayGetSize(pArray) - 1; d >= 0; --d) {
SDnodeObj *pDnode = taosArrayGet(pArray, d);
for (int32_t vn = 0; vn < pVgroup->replica; ++vn) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[vn];
if (pVgid->dnodeId == pDnode->id) {
int64_t vgMem = mndGetVgroupMemory(pMnode, NULL, pVgroup);
pDnode->memUsed -= vgMem;
mInfo("db:%s, vgId:%d, vn:%d is removed, memory:%" PRId64 ", dnode:%d avail:%" PRId64 " used:%" PRId64,
pVgroup->dbName, pVgroup->vgId, vn, vgMem, pVgid->dnodeId, pDnode->memAvail, pDnode->memUsed);
pDnode->numOfVnodes--;
pVgroup->replica--;
*pDelVgid = *pVgid;
*pVgid = pVgroup->vnodeGid[pVgroup->replica];
memset(&pVgroup->vnodeGid[pVgroup->replica], 0, sizeof(SVnodeGid));
code = 0;
goto _OVER;
}
}
}
_OVER:
if (code != 0) {
terrno = TSDB_CODE_APP_ERROR;
mError("db:%s, failed to remove vnode from vgId:%d since %s", pVgroup->dbName, pVgroup->vgId, terrstr());
return -1;
}
for (int32_t vn = 0; vn < pVgroup->replica; ++vn) {
SVnodeGid *pVgid = &pVgroup->vnodeGid[vn];
mInfo("db:%s, vgId:%d, vn:%d dnode:%d is reserved", pVgroup->dbName, pVgroup->vgId, vn, pVgid->dnodeId);
}
return 0;
}
int32_t mndAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SVnodeGid *pVgid) {
STransAction action = {0};
@ -1208,6 +1390,42 @@ int32_t mndAddAlterVnodeConfirmAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD
return 0;
}
int32_t mndAddChangeConfigAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb,
SVgObj *pOldVgroup, SVgObj *pNewVgroup, int32_t dnodeId) {
STransAction action = {0};
action.epSet = mndGetVgroupEpset(pMnode, pNewVgroup);
int32_t contLen = 0;
void *pReq = mndBuildAlterVnodeReplicaReq(pMnode, pDb, pNewVgroup, dnodeId, &contLen);
if (pReq == NULL) return -1;
int32_t totallen = contLen + sizeof(SMsgHead);
SMsgHead *pHead = taosMemoryMalloc(totallen);
if (pHead == NULL) {
taosMemoryFree(pReq);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
pHead->contLen = htonl(totallen);
pHead->vgId = htonl(pNewVgroup->vgId);
memcpy((void*)(pHead + 1), pReq, contLen);
taosMemoryFree(pReq);
action.pCont = pHead;
action.contLen = totallen;
action.msgType = TDMT_SYNC_CONFIG_CHANGE;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pHead);
return -1;
}
return 0;
}
static int32_t mndAddAlterVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, int32_t srcVgId, SVgObj *pVgroup) {
STransAction action = {0};
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
@ -1249,12 +1467,11 @@ int32_t mndAddAlterVnodeConfigAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb
return 0;
}
int32_t mndAddPrepareNewVgAction(SMnode *pMnode, STrans *pTrans, SVgObj *pVg) {
int32_t mndAddNewVgPrepareAction(SMnode *pMnode, STrans *pTrans, SVgObj *pVg) {
SSdbRaw *pRaw = mndVgroupActionEncode(pVg);
if (pRaw == NULL) goto _err;
STransAction action = {.pRaw = pRaw, .msgType = TDMT_MND_CREATE_VG};
if (mndTransAppendPrepareAction(pTrans, &action) != 0) goto _err;
if (mndTransAppendPrepareLog(pTrans, pRaw) != 0) goto _err;
(void)sdbSetRawStatus(pRaw, SDB_STATUS_CREATING);
pRaw = NULL;
return 0;
@ -1288,6 +1505,32 @@ int32_t mndAddAlterVnodeReplicaAction(SMnode *pMnode, STrans *pTrans, SDbObj *pD
return 0;
}
int32_t mndAddCheckLearnerCatchupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t dnodeId) {
SDnodeObj *pDnode = mndAcquireDnode(pMnode, dnodeId);
if (pDnode == NULL) return -1;
STransAction action = {0};
action.epSet = mndGetDnodeEpset(pDnode);
mndReleaseDnode(pMnode, pDnode);
int32_t contLen = 0;
void *pReq = mndBuildCheckLearnCatchupReq(pMnode, pDb, pVgroup, dnodeId, &contLen);
if (pReq == NULL) return -1;
action.pCont = pReq;
action.contLen = contLen;
action.msgType = TDMT_DND_CHECK_VNODE_LEARNER_CATCHUP;
action.acceptableCode = TSDB_CODE_VND_ALREADY_IS_VOTER;
action.retryCode = TSDB_CODE_VND_NOT_CATCH_UP;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
return -1;
}
return 0;
}
int32_t mndAddAlterVnodeTypeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, int32_t dnodeId) {
SDnodeObj *pDnode = mndAcquireDnode(pMnode, dnodeId);
if (pDnode == NULL) return -1;
@ -2205,6 +2448,133 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb
return 0;
}
int32_t mndBuildRaftAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pVgroup,
SArray *pArray) {
SVgObj newVgroup = {0};
memcpy(&newVgroup, pVgroup, sizeof(SVgObj));
if (pVgroup->replica <= 0 || pVgroup->replica == pNewDb->cfg.replications) {
if (mndAddAlterVnodeConfigAction(pMnode, pTrans, pNewDb, pVgroup) != 0) return -1;
if (mndCheckDnodeMemory(pMnode, pOldDb, pNewDb, &newVgroup, pVgroup, pArray) != 0) return -1;
return 0;
}
mndTransSetSerial(pTrans);
mInfo("trans:%d, vgId:%d, alter vgroup, syncConfChangeVer:%d, version:%d, replica:%d",
pTrans->id, pVgroup->vgId, pVgroup->syncConfChangeVer, pVgroup->version, pVgroup->replica);
if (newVgroup.replica == 1 && pNewDb->cfg.replications == 3) {
mInfo("db:%s, vgId:%d, will add 2 vnodes, vn:0 dnode:%d", pVgroup->dbName, pVgroup->vgId,
pVgroup->vnodeGid[0].dnodeId);
//add second
if (mndAddVnodeToVgroup(pMnode, pTrans, &newVgroup, pArray) != 0) return -1;
//add third
if (mndAddVnodeToVgroup(pMnode, pTrans, &newVgroup, pArray) != 0) return -1;
//add learner stage
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_LEARNER;
newVgroup.vnodeGid[2].nodeRole = TAOS_SYNC_ROLE_LEARNER;
if (mndAddChangeConfigAction(pMnode, pTrans, pNewDb, pVgroup, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0) return -1;
mInfo("trans:%d, vgId:%d, add change config, syncConfChangeVer:%d, version:%d, replica:%d",
pTrans->id, pVgroup->vgId, newVgroup.syncConfChangeVer, pVgroup->version, pVgroup->replica);
if (mndAddCreateVnodeAction(pMnode, pTrans, pNewDb, &newVgroup, &newVgroup.vnodeGid[1]) != 0) return -1;
mInfo("trans:%d, vgId:%d, create vnode, syncConfChangeVer:%d, version:%d, replica:%d",
pTrans->id, pVgroup->vgId, newVgroup.syncConfChangeVer, pVgroup->version, pVgroup->replica);
if (mndAddCreateVnodeAction(pMnode, pTrans, pNewDb, &newVgroup, &newVgroup.vnodeGid[2]) != 0) return -1;
mInfo("trans:%d, vgId:%d, create vnode, syncConfChangeVer:%d, version:%d, replica:%d",
pTrans->id, pVgroup->vgId, newVgroup.syncConfChangeVer, pVgroup->version, pVgroup->replica);
//check learner
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER;
newVgroup.vnodeGid[2].nodeRole = TAOS_SYNC_ROLE_VOTER;
if (mndAddCheckLearnerCatchupAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[1].dnodeId) != 0) return -1;
if (mndAddCheckLearnerCatchupAction(pMnode, pTrans, pNewDb, &newVgroup, newVgroup.vnodeGid[2].dnodeId) != 0) return -1;
//change raft type
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER;
newVgroup.vnodeGid[2].nodeRole = TAOS_SYNC_ROLE_LEARNER;
if (mndAddChangeConfigAction(pMnode, pTrans, pNewDb, pVgroup, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, &newVgroup) != 0) return -1;
newVgroup.vnodeGid[0].nodeRole = TAOS_SYNC_ROLE_VOTER;
newVgroup.vnodeGid[1].nodeRole = TAOS_SYNC_ROLE_VOTER;
newVgroup.vnodeGid[2].nodeRole = TAOS_SYNC_ROLE_VOTER;
if (mndAddChangeConfigAction(pMnode, pTrans, pNewDb, pVgroup, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0)
return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, &newVgroup) != 0) return -1;
SSdbRaw *pVgRaw = mndVgroupActionEncode(&newVgroup);
if (pVgRaw == NULL) return -1;
if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) {
sdbFreeRaw(pVgRaw);
return -1;
}
(void)sdbSetRawStatus(pVgRaw, SDB_STATUS_READY);
} else if (newVgroup.replica == 3 && pNewDb->cfg.replications == 1) {
mInfo("db:%s, vgId:%d, will remove 2 vnodes, vn:0 dnode:%d vn:1 dnode:%d vn:2 dnode:%d", pVgroup->dbName,
pVgroup->vgId, pVgroup->vnodeGid[0].dnodeId, pVgroup->vnodeGid[1].dnodeId, pVgroup->vnodeGid[2].dnodeId);
SVnodeGid del1 = {0};
if (mndRemoveVnodeFromVgroupWithoutSave(pMnode, pTrans, &newVgroup, pArray, &del1) != 0) return -1;
if (mndAddChangeConfigAction(pMnode, pTrans, pNewDb, pVgroup, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0) return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, &newVgroup) != 0) return -1;
if (mndAddDropVnodeAction(pMnode, pTrans, pNewDb, &newVgroup, &del1, true) != 0) return -1;
SSdbRaw *pVgRaw = mndVgroupActionEncode(&newVgroup);
if (pVgRaw == NULL) return -1;
if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) {
sdbFreeRaw(pVgRaw);
return -1;
}
(void)sdbSetRawStatus(pVgRaw, SDB_STATUS_READY);
SVnodeGid del2 = {0};
if (mndRemoveVnodeFromVgroupWithoutSave(pMnode, pTrans, &newVgroup, pArray, &del2) != 0) return -1;
if (mndAddChangeConfigAction(pMnode, pTrans, pNewDb, pVgroup, &newVgroup, newVgroup.vnodeGid[0].dnodeId) != 0) return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pNewDb, &newVgroup) != 0) return -1;
if (mndAddDropVnodeAction(pMnode, pTrans, pNewDb, &newVgroup, &del2, true) != 0) return -1;
pVgRaw = mndVgroupActionEncode(&newVgroup);
if (pVgRaw == NULL) return -1;
if (mndTransAppendRedolog(pTrans, pVgRaw) != 0) {
sdbFreeRaw(pVgRaw);
return -1;
}
(void)sdbSetRawStatus(pVgRaw, SDB_STATUS_READY);
} else {
return -1;
}
mndSortVnodeGid(&newVgroup);
{
SSdbRaw *pVgRaw = mndVgroupActionEncode(&newVgroup);
if (pVgRaw == NULL) return -1;
if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) {
sdbFreeRaw(pVgRaw);
return -1;
}
(void)sdbSetRawStatus(pVgRaw, SDB_STATUS_READY);
}
return 0;
}
int32_t mndBuildRestoreAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *db, SVgObj *pVgroup,
SDnodeObj *pDnode) {
SVgObj newVgroup = {0};
@ -2370,13 +2740,13 @@ int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgro
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
int32_t srcVgId = newVg1.vgId;
newVg1.vgId = maxVgId;
if (mndAddPrepareNewVgAction(pMnode, pTrans, &newVg1) != 0) goto _OVER;
if (mndAddNewVgPrepareAction(pMnode, pTrans, &newVg1) != 0) goto _OVER;
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, srcVgId, &newVg1) != 0) goto _OVER;
maxVgId++;
srcVgId = newVg2.vgId;
newVg2.vgId = maxVgId;
if (mndAddPrepareNewVgAction(pMnode, pTrans, &newVg2) != 0) goto _OVER;
if (mndAddNewVgPrepareAction(pMnode, pTrans, &newVg2) != 0) goto _OVER;
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, srcVgId, &newVg2) != 0) goto _OVER;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg1) != 0) goto _OVER;

View File

@ -106,6 +106,7 @@ typedef int32_t (*SdbInsertFp)(SSdb *pSdb, void *pObj);
typedef int32_t (*SdbUpdateFp)(SSdb *pSdb, void *pSrcObj, void *pDstObj);
typedef int32_t (*SdbDeleteFp)(SSdb *pSdb, void *pObj, bool callFunc);
typedef int32_t (*SdbDeployFp)(SMnode *pMnode);
typedef int32_t (*SdbValidateFp)(SMnode *pMnode, void *pTrans, void *pObj);
typedef SSdbRow *(*SdbDecodeFp)(SSdbRaw *pRaw);
typedef SSdbRaw *(*SdbEncodeFp)(void *pObj);
typedef bool (*sdbTraverseFp)(SMnode *pMnode, void *pObj, void *p1, void *p2, void *p3);
@ -189,6 +190,7 @@ typedef struct SSdb {
SdbDeployFp deployFps[SDB_MAX];
SdbEncodeFp encodeFps[SDB_MAX];
SdbDecodeFp decodeFps[SDB_MAX];
SdbValidateFp validateFps[SDB_MAX];
TdThreadMutex filelock;
} SSdb;
@ -207,6 +209,7 @@ typedef struct {
SdbInsertFp insertFp;
SdbUpdateFp updateFp;
SdbDeleteFp deleteFp;
SdbValidateFp validateFp;
} SSdbTable;
typedef struct SSdbOpt {

Some files were not shown because too many files have changed in this diff Show More