Merge branch 3.0 to FEAT/TD-25699-3.0
This commit is contained in:
commit
7735e6f388
|
@ -0,0 +1,19 @@
|
|||
# apr-util
|
||||
ExternalProject_Add(aprutil-1
|
||||
URL https://dlcdn.apache.org//apr/apr-util-1.6.3.tar.gz
|
||||
URL_HASH SHA256=2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
#GIT_REPOSITORY https://github.com/apache/apr-util.git
|
||||
#GIT_TAG 1.5.4
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1
|
||||
#CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -0,0 +1,19 @@
|
|||
# apr
|
||||
ExternalProject_Add(apr-1
|
||||
URL https://dlcdn.apache.org//apr/apr-1.7.4.tar.gz
|
||||
URL_HASH SHA256=a4137dd82a185076fa50ba54232d920a17c6469c30b0876569e1c2a05ff311d9
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
#GIT_REPOSITORY https://github.com/apache/apr.git
|
||||
#GIT_TAG 1.5.2
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/apr"
|
||||
BUILD_IN_SOURCE TRUE
|
||||
UPDATE_DISCONNECTED TRUE
|
||||
BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --enable-shared=no
|
||||
#CONFIGURE_COMMAND ./configure
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -1,5 +1,4 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
set(CMAKE_VERBOSE_MAKEFILE ON)
|
||||
set(TD_BUILD_TAOSA_INTERNAL FALSE)
|
||||
|
||||
|
@ -78,6 +77,12 @@ ELSE ()
|
|||
SET(TD_TAOS_TOOLS TRUE)
|
||||
ENDIF ()
|
||||
|
||||
IF (${TD_WINDOWS})
|
||||
SET(TAOS_LIB taos_static)
|
||||
ELSE ()
|
||||
SET(TAOS_LIB taos)
|
||||
ENDIF ()
|
||||
|
||||
IF (TD_WINDOWS)
|
||||
MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}")
|
||||
SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd")
|
||||
|
|
|
@ -125,6 +125,16 @@ option(
|
|||
ON
|
||||
)
|
||||
|
||||
IF(${TD_LINUX})
|
||||
|
||||
option(
|
||||
BUILD_WITH_COS
|
||||
"If build with cos"
|
||||
ON
|
||||
)
|
||||
|
||||
ENDIF ()
|
||||
|
||||
option(
|
||||
BUILD_WITH_SQLITE
|
||||
"If build with sqlite"
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
# cos
|
||||
ExternalProject_Add(cos
|
||||
GIT_REPOSITORY https://github.com/tencentyun/cos-c-sdk-v5.git
|
||||
GIT_TAG v5.0.16
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/cos-c-sdk-v5"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -0,0 +1,17 @@
|
|||
# curl
|
||||
ExternalProject_Add(curl
|
||||
URL https://curl.se/download/curl-8.2.1.tar.gz
|
||||
DOWNLOAD_NO_PROGRESS 1
|
||||
DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download"
|
||||
#GIT_REPOSITORY https://github.com/curl/curl.git
|
||||
#GIT_TAG curl-7_88_1
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/curl"
|
||||
BUILD_IN_SOURCE TRUE
|
||||
BUILD_ALWAYS 1
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli
|
||||
#CONFIGURE_COMMAND ./configure --without-ssl
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -0,0 +1,14 @@
|
|||
# cos
|
||||
ExternalProject_Add(mxml
|
||||
GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git
|
||||
GIT_TAG release-2.10
|
||||
SOURCE_DIR "${TD_CONTRIB_DIR}/mxml"
|
||||
#BINARY_DIR ""
|
||||
BUILD_IN_SOURCE TRUE
|
||||
#UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --enable-shared=no
|
||||
#CONFIGURE_COMMAND ./configure
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND make install
|
||||
TEST_COMMAND ""
|
||||
)
|
|
@ -6,6 +6,39 @@ function(cat IN_FILE OUT_FILE)
|
|||
file(APPEND ${OUT_FILE} "${CONTENTS}")
|
||||
endfunction(cat IN_FILE OUT_FILE)
|
||||
|
||||
if(${TD_LINUX})
|
||||
|
||||
set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
|
||||
if(${BUILD_WITH_COS})
|
||||
file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/)
|
||||
cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
|
||||
set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||
|
||||
if(${BUILD_WITH_COS})
|
||||
cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
|
||||
WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
|
||||
|
||||
endif(${TD_LINUX})
|
||||
|
||||
set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in")
|
||||
configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
||||
|
@ -122,6 +155,16 @@ if(${BUILD_WITH_SQLITE})
|
|||
cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
endif(${BUILD_WITH_SQLITE})
|
||||
|
||||
# cos
|
||||
if(${BUILD_WITH_COS})
|
||||
#cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
#cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
add_definitions(-DUSE_COS)
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
# lucene
|
||||
if(${BUILD_WITH_LUCENE})
|
||||
cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE})
|
||||
|
@ -347,6 +390,31 @@ if (${BUILD_WITH_ROCKSDB})
|
|||
endif()
|
||||
endif()
|
||||
|
||||
# cos
|
||||
if(${BUILD_WITH_COS})
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1)
|
||||
#ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a)
|
||||
option(ENABLE_TEST "Enable the tests" OFF)
|
||||
INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include)
|
||||
MESSAGE("$ENV{HOME}/.cos-local.1/include")
|
||||
|
||||
set(CMAKE_BUILD_TYPE debug)
|
||||
set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME})
|
||||
set(CMAKE_PROJECT_NAME cos_c_sdk)
|
||||
|
||||
add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL)
|
||||
target_include_directories(
|
||||
cos_c_sdk
|
||||
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/cos-c-sdk-v5/cos_c_sdk>
|
||||
)
|
||||
|
||||
set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME})
|
||||
else()
|
||||
|
||||
endif(${TD_LINUX})
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
# lucene
|
||||
# To support build on ubuntu: sudo apt-get install libboost-all-dev
|
||||
if(${BUILD_WITH_LUCENE})
|
||||
|
|
|
@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
add_subdirectory(rocksdb)
|
||||
endif(${BUILD_WITH_ROCKSDB})
|
||||
|
||||
# cos
|
||||
if(${BUILD_WITH_COS})
|
||||
add_subdirectory(cos)
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
if(${BUILD_WITH_LUCENE})
|
||||
add_subdirectory(lucene)
|
||||
endif(${BUILD_WITH_LUCENE})
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
add_executable(cosTest "")
|
||||
target_sources(cosTest
|
||||
PRIVATE
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
|
||||
)
|
||||
|
||||
#find_path(APR_INCLUDE_DIR apr-1/apr_time.h)
|
||||
#find_path(APR_UTIL_INCLUDE_DIR apr/include/apr-1/apr_md5.h)
|
||||
#find_path(MINIXML_INCLUDE_DIR mxml.h)
|
||||
#find_path(CURL_INCLUDE_DIR curl/curl.h)
|
||||
|
||||
#include_directories (${MINIXML_INCLUDE_DIR})
|
||||
#include_directories (${CURL_INCLUDE_DIR})
|
||||
FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
|
||||
#FIND_PROGRAM(APU_CONFIG_BIN NAMES apu-config apu-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
|
||||
|
||||
IF (APR_CONFIG_BIN)
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND ${APR_CONFIG_BIN} --includedir
|
||||
OUTPUT_VARIABLE APR_INCLUDE_DIR
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
ENDIF()
|
||||
#IF (APU_CONFIG_BIN)
|
||||
# EXECUTE_PROCESS(
|
||||
# COMMAND ${APU_CONFIG_BIN} --includedir
|
||||
# OUTPUT_VARIABLE APR_UTIL_INCLUDE_DIR
|
||||
# OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
# )
|
||||
#ENDIF()
|
||||
|
||||
include_directories (${APR_INCLUDE_DIR})
|
||||
#include_directories (${APR_UTIL_INCLUDE_DIR})
|
||||
|
||||
target_include_directories(
|
||||
cosTest
|
||||
PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk"
|
||||
)
|
||||
|
||||
#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
|
||||
#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
|
||||
#find_library(MINIXML_LIBRARY mxml)
|
||||
#find_library(CURL_LIBRARY curl)
|
||||
|
||||
target_link_libraries(cosTest cos_c_sdk)
|
||||
target_link_libraries(cosTest apr-1})
|
||||
target_link_libraries(cosTest aprutil-1})
|
||||
target_link_libraries(cosTest mxml)
|
||||
target_link_libraries(cosTest curl)
|
File diff suppressed because it is too large
Load Diff
|
@ -52,7 +52,7 @@ CREATE TABLE d1004 USING meters TAGS ("California.LosAngeles", 3);
|
|||
### Create a Stream
|
||||
|
||||
```sql
|
||||
create stream current_stream into current_stream_output_stb as select _wstart as start, _wend as end, max(current) as max_current from meters where voltage <= 220 interval (5s);
|
||||
create stream current_stream trigger at_once into current_stream_output_stb as select _wstart as wstart, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);
|
||||
```
|
||||
|
||||
### Write Data
|
||||
|
|
|
@ -23,7 +23,20 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
|
|||
|
||||
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
|
||||
|
||||
Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
|
||||
Tips:(c interface for example)
|
||||
1. A consumption group consumes all data under the same topic, and different consumption groups are independent of each other;
|
||||
2. A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data;
|
||||
3. On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups;
|
||||
4. Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block;
|
||||
5. If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data;
|
||||
6. If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready;
|
||||
7. The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for;
|
||||
8. Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again;
|
||||
9. Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data;
|
||||
10. Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported;
|
||||
11. Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number;
|
||||
12. The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four;
|
||||
13. Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
|
||||
|
||||
## Data Schema and API
|
||||
|
||||
|
@ -33,40 +46,59 @@ The related schemas and APIs in various languages are described as follows:
|
|||
<TabItem value="c" label="C">
|
||||
|
||||
```c
|
||||
typedef struct tmq_t tmq_t;
|
||||
typedef struct tmq_conf_t tmq_conf_t;
|
||||
typedef struct tmq_list_t tmq_list_t;
|
||||
typedef struct tmq_t tmq_t;
|
||||
typedef struct tmq_conf_t tmq_conf_t;
|
||||
typedef struct tmq_list_t tmq_list_t;
|
||||
|
||||
typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param));
|
||||
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
|
||||
|
||||
DLL_EXPORT tmq_list_t *tmq_list_new();
|
||||
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
|
||||
DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
|
||||
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);
|
||||
typedef enum tmq_conf_res_t {
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
} tmq_conf_res_t;
|
||||
|
||||
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
|
||||
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
|
||||
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
|
||||
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
|
||||
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
|
||||
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
||||
typedef struct tmq_topic_assignment {
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
int64_t begin;
|
||||
int64_t end; // The last version of wal + 1
|
||||
} tmq_topic_assignment;
|
||||
|
||||
enum tmq_conf_res_t {
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
};
|
||||
typedef enum tmq_conf_res_t tmq_conf_res_t;
|
||||
DLL_EXPORT tmq_conf_t *tmq_conf_new();
|
||||
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
|
||||
DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
|
||||
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
|
||||
|
||||
DLL_EXPORT tmq_conf_t *tmq_conf_new();
|
||||
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
|
||||
DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
|
||||
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT tmq_list_t *tmq_list_new();
|
||||
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
|
||||
DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
|
||||
DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *);
|
||||
DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *);
|
||||
|
||||
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
|
||||
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
|
||||
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
|
||||
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
|
||||
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
|
||||
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
|
||||
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msg’s offset + 1
|
||||
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment);
|
||||
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
|
||||
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
|
||||
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
|
||||
|
||||
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
|
||||
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
|
||||
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
|
||||
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);
|
||||
```
|
||||
|
||||
For more information, see [C/C++ Connector](/reference/connector/cpp).
|
||||
|
||||
The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below.
|
||||
|
||||
</TabItem>
|
||||
|
|
|
@ -4,7 +4,7 @@ sidebar_label: Access Control
|
|||
description: This document describes how to manage users and permissions in TDengine.
|
||||
---
|
||||
|
||||
This document describes how to manage permissions in TDengine.
|
||||
User and Access control is a distingguished feature of TDengine enterprise edition. In this section, only the most fundamental functionalities of user and access control are demonstrated. To get the full knowledge of user and access control, please contact the TDengine team.
|
||||
|
||||
## Create a User
|
||||
|
||||
|
|
|
@ -378,7 +378,7 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
|||
- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)`
|
||||
|
||||
**Function description**
|
||||
This interface writes the text data of the line protocol to TDengine.
|
||||
- This interface writes the text data of the line protocol to TDengine.
|
||||
|
||||
**Parameter description**
|
||||
- taos: database connection, established by the `taos_connect()` function.
|
||||
|
@ -387,12 +387,13 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
|||
- protocol: the protocol type of the lines, used to identify the text data format.
|
||||
- precision: precision string for the timestamp in the text data.
|
||||
|
||||
**return value**
|
||||
TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`.
|
||||
**Return value**
|
||||
- TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`.
|
||||
In some cases, the returned TAOS_RES is `NULL`, and it is still possible to call `taos_errno()` to safely get the error code information.
|
||||
The returned TAOS_RES needs to be freed by the caller in order to avoid memory leaks.
|
||||
|
||||
**Description**
|
||||
|
||||
The protocol type is enumerated and contains the following three formats.
|
||||
|
||||
- TSDB_SML_LINE_PROTOCOL: InfluxDB line protocol (Line Protocol)
|
||||
|
@ -427,3 +428,89 @@ In addition to writing data using the SQL method or the parameter binding API, w
|
|||
- Within _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
|
||||
- Within _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
|
||||
- Within _reqid interfaces can track the entire call chain by passing the reqid parameter.
|
||||
|
||||
### Subscription API
|
||||
|
||||
- `int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment)`
|
||||
- `void tmq_free_assignment(tmq_topic_assignment* pAssignment)`
|
||||
|
||||
tmq_topic_assignment defined as follows:
|
||||
```c
|
||||
typedef struct tmq_topic_assignment {
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
int64_t begin;
|
||||
int64_t end;
|
||||
} tmq_topic_assignment;
|
||||
```
|
||||
**Function description**
|
||||
- tmq_get_topic_assignment get the current vgroup information of this consumer
|
||||
|
||||
**Parameter description**
|
||||
- numOfAssignment:the num of vgroups assigned to this consumer
|
||||
- assignment:the information of vgroups, needed to be freed by tmq_free_assignment
|
||||
|
||||
**Return value**
|
||||
- zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
|
||||
|
||||
- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
|
||||
**Function description**
|
||||
- get the committed offset
|
||||
|
||||
**Return value**
|
||||
- the value of committed offset, -2147467247 means no committed value, Other values less than 0 indicate failure
|
||||
|
||||
- `int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg)`
|
||||
- `void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param)`
|
||||
- `int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
|
||||
- `void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param)`
|
||||
|
||||
**Function description**
|
||||
|
||||
The commit interface is divided into two types, each with synchronous and asynchronous interfaces:
|
||||
- The first type: based on message submission, submit the progress in the message. If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer: tmq_commit_sync/tmq_commit_async
|
||||
- The second type: submit based on the offset of a Vgroup in a topic: tmq_commit_offset_sync/tmq_commit_offset_async
|
||||
|
||||
**Parameter description**
|
||||
- msg:Message consumed, If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer
|
||||
|
||||
**Return value**
|
||||
- zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
|
||||
|
||||
- `int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
|
||||
|
||||
**Function description**
|
||||
- Obtain the current consumption location, which is the next location of the data consumed
|
||||
|
||||
**Return value**
|
||||
- the current consumption location, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
|
||||
|
||||
- `int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
|
||||
|
||||
**Function description**
|
||||
- Set the offset position of the consumer in a Vgroup of a certain topic to start consumption
|
||||
|
||||
**Return value**
|
||||
- zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
|
||||
|
||||
- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)`
|
||||
|
||||
**Function description**
|
||||
- Obtain the starting offset of the consumed data
|
||||
|
||||
**Parameter description**
|
||||
- msg:Message consumed
|
||||
|
||||
**Return value**
|
||||
- the starting offset of the consumed data, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
|
||||
|
||||
- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)`
|
||||
|
||||
**Function description**
|
||||
- Obtain a list of topics subscribed by consumers
|
||||
|
||||
**Parameter description**
|
||||
- topics: a list of topics subscribed by consumers,need to be freed by tmq_list_destroy
|
||||
|
||||
**Return value**
|
||||
- zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)`
|
|
@ -1091,8 +1091,6 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
@ -1176,7 +1174,6 @@ public abstract class ConsumerLoop {
|
|||
config.setProperty("client.id", "1");
|
||||
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
|
||||
config.setProperty("value.deserializer.encoding", "UTF-8");
|
||||
config.setProperty("experimental.snapshot.enable", "true");
|
||||
|
||||
this.consumer = new TaosConsumer<>(config);
|
||||
this.topics = Collections.singletonList("topic_speed");
|
||||
|
|
|
@ -81,6 +81,14 @@ For example:
|
|||
taos -h h1.taos.com -s "use db; show tables;"
|
||||
```
|
||||
|
||||
## Export query results to a file
|
||||
|
||||
- You can use ">>" to export the query results to a file, the syntax is like `select * from table >> file`. If there is only file name without path, the file will be generated under the current working directory of TDegnine CLI.
|
||||
|
||||
## Import data from CSV file
|
||||
|
||||
- You can use `insert into table_name file 'fileName'` to import the data from the specified file into the specified table. For example, `insert into d0 file '/root/d0.csv';` means importing the data in file "/root/d0.csv" into table "d0". If there is only file name without path, that means the file is located under current working directory of TDengine CLI.
|
||||
|
||||
## TDengine CLI tips
|
||||
|
||||
- You can use the up and down keys to iterate the history of commands entered
|
||||
|
@ -89,3 +97,5 @@ taos -h h1.taos.com -s "use db; show tables;"
|
|||
- Execute `RESET QUERY CACHE` to clear the local cache of the table schema
|
||||
- Execute SQL statements in batches. You can store a series of shell commands (ending with ;, one line for each SQL command) in a script file and execute the command `source <file-name>` in the TDengine CLI to execute all SQL commands in that file automatically
|
||||
- Enter `q` to exit TDengine CLI
|
||||
|
||||
|
||||
|
|
|
@ -227,12 +227,6 @@ tmq_t* build_consumer() {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
|
||||
if (TMQ_CONF_OK != code) {
|
||||
tmq_conf_destroy(conf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
|
||||
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
|
||||
|
|
|
@ -35,7 +35,6 @@ func main() {
|
|||
"td.connect.port": "6030",
|
||||
"client.id": "test_tmq_client",
|
||||
"enable.auto.commit": "false",
|
||||
"experimental.snapshot.enable": "true",
|
||||
"msg.with.table.name": "true",
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -25,7 +25,20 @@ import CDemo from "./_sub_c.mdx";
|
|||
|
||||
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
|
||||
|
||||
注意:数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似。
|
||||
说明(以c接口为例):
|
||||
1. 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立;
|
||||
2. 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据;
|
||||
3. 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联;
|
||||
4. 每次poll服务端会返回一个结果block,该block属于一个vgroup,可能包含多个wal版本的数据,可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset;
|
||||
5. 一个消费组如果从未commit过offset,当其成员消费者重启重新拉取数据时,均从参数auto.offset.reset设定值开始消费;在一个消费者生命周期中,客户端本地记录了最近一次拉取数据的offset,不会拉取重复数据;
|
||||
6. 消费者如果异常终止(没有调用tmq_close),需等约12秒后触发其所属消费组rebalance,该消费者在服务端状态变为LOST,约1天后该消费者自动被删除;正常退出,退出后就会删除消费者;新增消费者,需等约2秒触发rebalance,该消费者在服务端状态变为ready;
|
||||
7. 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配,消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作;
|
||||
8. 消费者可利用 tmq_position 获得当前消费的offset,并seek到指定offset,重新消费;
|
||||
9. seek将position指向指定offset,不执行commit操作,一旦seek成功,可poll拉取指定offset及以后的数据;
|
||||
10. seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法,如非法将报错;
|
||||
11. tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset,当seek至该offset时,将消费到这个block里的全部数据。参见第四点;
|
||||
12. 由于存在 WAL 过期删除机制,即使seek 操作成功,poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号,将会从WAL最小版本号消费;
|
||||
13. 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似;
|
||||
|
||||
## 主要数据结构和 API
|
||||
|
||||
|
@ -35,39 +48,60 @@ import CDemo from "./_sub_c.mdx";
|
|||
<TabItem value="c" label="C">
|
||||
|
||||
```c
|
||||
typedef struct tmq_t tmq_t;
|
||||
typedef struct tmq_conf_t tmq_conf_t;
|
||||
typedef struct tmq_list_t tmq_list_t;
|
||||
typedef struct tmq_t tmq_t;
|
||||
typedef struct tmq_conf_t tmq_conf_t;
|
||||
typedef struct tmq_list_t tmq_list_t;
|
||||
|
||||
typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param));
|
||||
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
|
||||
|
||||
DLL_EXPORT tmq_list_t *tmq_list_new();
|
||||
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
|
||||
DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
|
||||
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);
|
||||
typedef enum tmq_conf_res_t {
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
} tmq_conf_res_t;
|
||||
|
||||
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
|
||||
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
|
||||
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
|
||||
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
|
||||
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
|
||||
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
||||
typedef struct tmq_topic_assignment {
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
int64_t begin;
|
||||
int64_t end;
|
||||
} tmq_topic_assignment;
|
||||
|
||||
enum tmq_conf_res_t {
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
};
|
||||
typedef enum tmq_conf_res_t tmq_conf_res_t;
|
||||
DLL_EXPORT tmq_conf_t *tmq_conf_new();
|
||||
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
|
||||
DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
|
||||
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
|
||||
|
||||
DLL_EXPORT tmq_conf_t *tmq_conf_new();
|
||||
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
|
||||
DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
|
||||
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT tmq_list_t *tmq_list_new();
|
||||
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
|
||||
DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
|
||||
DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *);
|
||||
DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *);
|
||||
|
||||
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
|
||||
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
|
||||
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
|
||||
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
|
||||
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
|
||||
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
|
||||
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
|
||||
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment);
|
||||
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
|
||||
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId);
|
||||
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
|
||||
|
||||
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
|
||||
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
|
||||
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
|
||||
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
|
||||
```
|
||||
|
||||
这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
|
||||
下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="java" label="Java">
|
||||
|
|
|
@ -467,21 +467,22 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)`
|
||||
|
||||
**功能说明**
|
||||
该接口将行协议的文本数据写入到 TDengine 中。
|
||||
- 该接口将行协议的文本数据写入到 TDengine 中。
|
||||
|
||||
**参数说明**
|
||||
taos: 数据库连接,通过 `taos_connect()` 函数建立的数据库连接。
|
||||
lines:文本数据。满足解析格式要求的无模式文本字符串。
|
||||
numLines:文本数据的行数,不能为 0 。
|
||||
protocol: 行协议类型,用于标识文本数据格式。
|
||||
precision:文本数据中的时间戳精度字符串。
|
||||
- taos: 数据库连接,通过 `taos_connect()` 函数建立的数据库连接。
|
||||
- lines:文本数据。满足解析格式要求的无模式文本字符串。
|
||||
- numLines:文本数据的行数,不能为 0 。
|
||||
- protocol: 行协议类型,用于标识文本数据格式。
|
||||
- precision:文本数据中的时间戳精度字符串。
|
||||
|
||||
**返回值**
|
||||
TAOS_RES 结构体,应用可以通过使用 `taos_errstr()` 获得错误信息,也可以使用 `taos_errno()` 获得错误码。
|
||||
- TAOS_RES 结构体,应用可以通过使用 `taos_errstr()` 获得错误信息,也可以使用 `taos_errno()` 获得错误码。
|
||||
在某些情况下,返回的 TAOS_RES 为 `NULL`,此时仍然可以调用 `taos_errno()` 来安全地获得错误码信息。
|
||||
返回的 TAOS_RES 需要调用方来负责释放,否则会出现内存泄漏。
|
||||
|
||||
**说明**
|
||||
|
||||
协议类型是枚举类型,包含以下三种格式:
|
||||
|
||||
- TSDB_SML_LINE_PROTOCOL:InfluxDB 行协议(Line Protocol)
|
||||
|
@ -515,3 +516,90 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
|
|||
- 带_raw的接口通过传递的参数lines指针和长度len来表示数据,为了解决原始接口数据包含'\0'而被截断的问题。totalRows指针返回解析出来的数据行数。
|
||||
- 带_ttl的接口可以传递ttl参数来控制建表的ttl到期时间。
|
||||
- 带_reqid的接口可以通过传递reqid参数来追踪整个的调用链。
|
||||
|
||||
### 数据订阅 API
|
||||
|
||||
- `int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment)`
|
||||
- `void tmq_free_assignment(tmq_topic_assignment* pAssignment)`
|
||||
|
||||
tmq_topic_assignment结构体定义如下:
|
||||
```c
|
||||
typedef struct tmq_topic_assignment {
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
int64_t begin;
|
||||
int64_t end;
|
||||
} tmq_topic_assignment;
|
||||
```
|
||||
**功能说明**
|
||||
- tmq_get_topic_assignment 接口返回当前consumer分配的vgroup的信息,每个vgroup的信息包括vgId,wal的最大最小offset,以及当前消费到的offset。
|
||||
|
||||
**参数说明**
|
||||
- numOfAssignment :分配给该consumer有效的vgroup个数。
|
||||
- assignment :分配的信息,数据大小为numOfAssignment,需要通过 tmq_free_assignment 接口释放。
|
||||
|
||||
**返回值**
|
||||
- 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息。
|
||||
|
||||
- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
|
||||
**功能说明**
|
||||
- 获取当前 consumer 在某个 topic 和 vgroup上的 commit 位置。
|
||||
|
||||
**返回值**
|
||||
- 当前commit的位置,-2147467247表示没有消费进度,其他小于0的值表示失败,错误码就是返回值
|
||||
|
||||
- `int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg)`
|
||||
- `void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param)`
|
||||
- `int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
|
||||
- `void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param)`
|
||||
|
||||
**功能说明**
|
||||
|
||||
commit接口分为两种类型,每种类型有同步和异步接口:
|
||||
- 第一种类型:根据消息提交,提交消息里的进度,如果消息传NULL,提交当前consumer所有消费的vgroup的当前进度 : tmq_commit_sync/tmq_commit_async
|
||||
- 第二种类型:根据某个topic的某个vgroup的offset提交 : tmq_commit_offset_sync/tmq_commit_offset_async
|
||||
|
||||
**参数说明**
|
||||
- msg:消费到的消息结构,如果msg传NULL,提交当前consumer所有消费的vgroup的当前进度
|
||||
|
||||
**返回值**
|
||||
- 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
|
||||
|
||||
- `int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId)`
|
||||
|
||||
**功能说明**
|
||||
- 获取当前消费位置,为消费到的数据位置的下一个位置
|
||||
|
||||
**返回值**
|
||||
- 消费位置,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
|
||||
|
||||
- `int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)`
|
||||
|
||||
**功能说明**
|
||||
- 设置 consumer 在某个topic的某个vgroup的 offset位置,开始消费
|
||||
|
||||
**返回值**
|
||||
- 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
|
||||
|
||||
- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)`
|
||||
|
||||
**功能说明**
|
||||
|
||||
获取 poll 消费到的数据的起始offset
|
||||
|
||||
**参数说明**
|
||||
- msg:消费到的消息结构
|
||||
|
||||
**返回值**
|
||||
- 消费到的offset,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
|
||||
|
||||
- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)`
|
||||
|
||||
**功能说明**
|
||||
|
||||
获取消费者订阅的 topic 列表
|
||||
**参数说明**
|
||||
- topics: 获取的 topic 列表存储在这个结构中,接口内分配内存,需调用tmq_list_destroy释放
|
||||
|
||||
**返回值**
|
||||
- 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息
|
|
@ -249,3 +249,12 @@ T = 最新事件时间 - DELETE_MARK
|
|||
- [unique](../function/#unique)
|
||||
- [mode](../function/#mode)
|
||||
|
||||
## 暂停、恢复流计算
|
||||
1.流计算暂停计算任务
|
||||
PAUSE STREAM [IF EXISTS] stream_name;
|
||||
没有指定IF EXISTS,如果该stream不存在,则报错;如果存在,则暂停流计算。指定了IF EXISTS,如果该stream不存在,则返回成功;如果存在,则暂停流计算
|
||||
|
||||
2.流计算恢复计算任务
|
||||
RESUME STREAM [IF EXISTS] [IGNORE UNTREATED] stream_name;
|
||||
没有指定IF EXISTS,如果该stream不存在,则报错,如果存在,则恢复流计算;指定了IF EXISTS,如果stream不存在,则返回成功;如果存在,则恢复流计算。如果指定IGNORE UNTREATED,则恢复流计算时,忽略流计算暂停期间写入的数据。
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ title: 权限管理
|
|||
description: 企业版中才具有的权限管理功能
|
||||
---
|
||||
|
||||
本节讲述如何在 TDengine 中进行权限管理的相关操作。
|
||||
本节讲述如何在 TDengine 中进行权限管理的相关操作。权限管理是 TDengine 企业版的特有功能,本节只列举了一些基本的权限管理功能作为示例,更丰富的权限管理请联系 TDengine 销售或市场团队。
|
||||
|
||||
## 创建用户
|
||||
|
||||
|
|
|
@ -89,3 +89,11 @@ taos -h h1.taos.com -s "use db; show tables;"
|
|||
- 执行 `RESET QUERY CACHE` 可清除本地表 Schema 的缓存
|
||||
- 批量执行 SQL 语句。可以将一系列的 TDengine CLI 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 TDengine CLI 里执行命令 `source <file-name>` 自动执行该文件里所有的 SQL 语句
|
||||
- 输入 `q` 或 `quit` 或 `exit` 回车,可以退出 TDengine CLI
|
||||
|
||||
## TDengine CLI 导出查询结果到文件中
|
||||
|
||||
- 可以使用符号 “>>” 导出查询结果到某个文件中,语法为: sql 查询语句 >> ‘输出文件名’; 输出文件如果不写路径的话,将输出至当前目录下。如 select * from d0 >> ‘/root/d0.csv’; 将把查询结果输出到 /root/d0.csv 中。
|
||||
|
||||
## TDengine CLI 导入文件中的数据到表中
|
||||
|
||||
- 可以使用 insert into table_name file '输入文件名',把上一步中导出的数据文件再导入到指定表中。如 insert into d0 file '/root/d0.csv'; 表示把上面导出的数据全部再导致至 d0 表中。
|
||||
|
|
|
@ -228,11 +228,6 @@ tmq_t* build_consumer() {
|
|||
tmq_conf_destroy(conf);
|
||||
return NULL;
|
||||
}
|
||||
code = tmq_conf_set(conf, "experimental.snapshot.enable", "false");
|
||||
if (TMQ_CONF_OK != code) {
|
||||
tmq_conf_destroy(conf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
|
||||
tmq = tmq_consumer_new(conf, NULL, 0);
|
||||
|
|
|
@ -260,19 +260,14 @@ typedef struct tmq_t tmq_t;
|
|||
typedef struct tmq_conf_t tmq_conf_t;
|
||||
typedef struct tmq_list_t tmq_list_t;
|
||||
|
||||
typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param));
|
||||
typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param));
|
||||
|
||||
DLL_EXPORT tmq_list_t *tmq_list_new();
|
||||
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
|
||||
DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
|
||||
DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *);
|
||||
DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *);
|
||||
typedef enum tmq_conf_res_t {
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
} tmq_conf_res_t;
|
||||
|
||||
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
|
||||
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);
|
||||
|
||||
/* ------------------------TMQ CONSUMER INTERFACE------------------------ */
|
||||
typedef struct tmq_topic_assignment {
|
||||
int32_t vgId;
|
||||
int64_t currentOffset;
|
||||
|
@ -280,43 +275,38 @@ typedef struct tmq_topic_assignment {
|
|||
int64_t end;
|
||||
} tmq_topic_assignment;
|
||||
|
||||
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
|
||||
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
|
||||
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
|
||||
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
|
||||
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
|
||||
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg);
|
||||
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,
|
||||
int32_t *numOfAssignment);
|
||||
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
|
||||
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||
|
||||
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
|
||||
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
|
||||
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
|
||||
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
|
||||
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId);
|
||||
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
|
||||
|
||||
/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */
|
||||
|
||||
enum tmq_conf_res_t {
|
||||
TMQ_CONF_UNKNOWN = -2,
|
||||
TMQ_CONF_INVALID = -1,
|
||||
TMQ_CONF_OK = 0,
|
||||
};
|
||||
|
||||
typedef enum tmq_conf_res_t tmq_conf_res_t;
|
||||
|
||||
DLL_EXPORT tmq_conf_t *tmq_conf_new();
|
||||
DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value);
|
||||
DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
|
||||
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
|
||||
|
||||
/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */
|
||||
DLL_EXPORT tmq_list_t *tmq_list_new();
|
||||
DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *);
|
||||
DLL_EXPORT void tmq_list_destroy(tmq_list_t *);
|
||||
DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *);
|
||||
DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *);
|
||||
|
||||
DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen);
|
||||
DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list);
|
||||
DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq);
|
||||
DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics);
|
||||
DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout);
|
||||
DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq);
|
||||
DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msg’s offset + 1
|
||||
DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||
DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param);
|
||||
DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment);
|
||||
DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment);
|
||||
DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset);
|
||||
DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1
|
||||
DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId);
|
||||
|
||||
DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res);
|
||||
DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res);
|
||||
DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res);
|
||||
DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res);
|
||||
DLL_EXPORT const char *tmq_err2str(int32_t code);
|
||||
|
||||
/* ------------------------------ TAOSX -----------------------------------*/
|
||||
// note: following apis are unstable
|
||||
|
|
|
@ -152,6 +152,8 @@ enum {
|
|||
STREAM_INPUT__DATA_RETRIEVE,
|
||||
STREAM_INPUT__GET_RES,
|
||||
STREAM_INPUT__CHECKPOINT,
|
||||
STREAM_INPUT__CHECKPOINT_TRIGGER,
|
||||
STREAM_INPUT__TRANS_STATE,
|
||||
STREAM_INPUT__REF_DATA_BLOCK,
|
||||
STREAM_INPUT__DESTROY,
|
||||
};
|
||||
|
@ -168,7 +170,9 @@ typedef enum EStreamType {
|
|||
STREAM_PULL_DATA,
|
||||
STREAM_PULL_OVER,
|
||||
STREAM_FILL_OVER,
|
||||
STREAM_CHECKPOINT,
|
||||
STREAM_CREATE_CHILD_TABLE,
|
||||
STREAM_TRANS_STATE,
|
||||
} EStreamType;
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
|
|
@ -241,7 +241,7 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols);
|
|||
const char* blockDecode(SSDataBlock* pBlock, const char* pData);
|
||||
|
||||
// for debug
|
||||
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
|
||||
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf, const char* taskIdStr);
|
||||
|
||||
int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pDataBlocks, const STSchema* pTSchema, int64_t uid, int32_t vgId,
|
||||
tb_uid_t suid);
|
||||
|
|
|
@ -199,6 +199,7 @@ extern bool tsFilterScalarMode;
|
|||
extern int32_t tsKeepTimeOffset;
|
||||
extern int32_t tsMaxStreamBackendCache;
|
||||
extern int32_t tsPQSortMemThreshold;
|
||||
extern int32_t tsResolveFQDNRetryTime;
|
||||
|
||||
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
|
||||
|
||||
|
|
|
@ -231,6 +231,7 @@ typedef struct SField {
|
|||
uint8_t type;
|
||||
int8_t flags;
|
||||
int32_t bytes;
|
||||
char comment[TSDB_COL_COMMENT_LEN];
|
||||
} SField;
|
||||
|
||||
typedef struct SRetention {
|
||||
|
@ -309,6 +310,7 @@ struct SSchema {
|
|||
col_id_t colId;
|
||||
int32_t bytes;
|
||||
char name[TSDB_COL_NAME_LEN];
|
||||
char comment[TSDB_COL_COMMENT_LEN];
|
||||
};
|
||||
|
||||
struct SSchema2 {
|
||||
|
@ -2390,6 +2392,9 @@ typedef struct {
|
|||
int8_t type;
|
||||
int8_t flags;
|
||||
int32_t bytes;
|
||||
bool hasColComment;
|
||||
char* colComment;
|
||||
int32_t colCommentLen;
|
||||
// TSDB_ALTER_TABLE_DROP_COLUMN
|
||||
// TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES
|
||||
int8_t colModType;
|
||||
|
|
|
@ -254,7 +254,6 @@ enum {
|
|||
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY, "stream-scan-history", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY_FINISH, "stream-scan-history-finish", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TRANSFER_STATE, "stream-transfer-state", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECK, "stream-task-check", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT, "stream-checkpoint", NULL, NULL)
|
||||
TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL)
|
||||
|
|
|
@ -16,105 +16,105 @@
|
|||
#ifndef _TD_COMMON_TOKEN_H_
|
||||
#define _TD_COMMON_TOKEN_H_
|
||||
|
||||
#define TK_OR 1
|
||||
#define TK_AND 2
|
||||
#define TK_UNION 3
|
||||
#define TK_ALL 4
|
||||
#define TK_MINUS 5
|
||||
#define TK_EXCEPT 6
|
||||
#define TK_INTERSECT 7
|
||||
#define TK_NK_BITAND 8
|
||||
#define TK_NK_BITOR 9
|
||||
#define TK_NK_LSHIFT 10
|
||||
#define TK_NK_RSHIFT 11
|
||||
#define TK_NK_PLUS 12
|
||||
#define TK_NK_MINUS 13
|
||||
#define TK_NK_STAR 14
|
||||
#define TK_NK_SLASH 15
|
||||
#define TK_NK_REM 16
|
||||
#define TK_NK_CONCAT 17
|
||||
#define TK_CREATE 18
|
||||
#define TK_ACCOUNT 19
|
||||
#define TK_NK_ID 20
|
||||
#define TK_PASS 21
|
||||
#define TK_NK_STRING 22
|
||||
#define TK_ALTER 23
|
||||
#define TK_PPS 24
|
||||
#define TK_TSERIES 25
|
||||
#define TK_STORAGE 26
|
||||
#define TK_STREAMS 27
|
||||
#define TK_QTIME 28
|
||||
#define TK_DBS 29
|
||||
#define TK_USERS 30
|
||||
#define TK_CONNS 31
|
||||
#define TK_STATE 32
|
||||
#define TK_USER 33
|
||||
#define TK_ENABLE 34
|
||||
#define TK_NK_INTEGER 35
|
||||
#define TK_SYSINFO 36
|
||||
#define TK_DROP 37
|
||||
#define TK_GRANT 38
|
||||
#define TK_ON 39
|
||||
#define TK_TO 40
|
||||
#define TK_REVOKE 41
|
||||
#define TK_FROM 42
|
||||
#define TK_SUBSCRIBE 43
|
||||
#define TK_NK_COMMA 44
|
||||
#define TK_READ 45
|
||||
#define TK_WRITE 46
|
||||
#define TK_NK_DOT 47
|
||||
#define TK_WITH 48
|
||||
#define TK_DNODE 49
|
||||
#define TK_PORT 50
|
||||
#define TK_DNODES 51
|
||||
#define TK_RESTORE 52
|
||||
#define TK_NK_IPTOKEN 53
|
||||
#define TK_FORCE 54
|
||||
#define TK_UNSAFE 55
|
||||
#define TK_LOCAL 56
|
||||
#define TK_QNODE 57
|
||||
#define TK_BNODE 58
|
||||
#define TK_SNODE 59
|
||||
#define TK_MNODE 60
|
||||
#define TK_VNODE 61
|
||||
#define TK_DATABASE 62
|
||||
#define TK_USE 63
|
||||
#define TK_FLUSH 64
|
||||
#define TK_TRIM 65
|
||||
#define TK_COMPACT 66
|
||||
#define TK_IF 67
|
||||
#define TK_NOT 68
|
||||
#define TK_EXISTS 69
|
||||
#define TK_BUFFER 70
|
||||
#define TK_CACHEMODEL 71
|
||||
#define TK_CACHESIZE 72
|
||||
#define TK_COMP 73
|
||||
#define TK_DURATION 74
|
||||
#define TK_NK_VARIABLE 75
|
||||
#define TK_MAXROWS 76
|
||||
#define TK_MINROWS 77
|
||||
#define TK_KEEP 78
|
||||
#define TK_PAGES 79
|
||||
#define TK_PAGESIZE 80
|
||||
#define TK_TSDB_PAGESIZE 81
|
||||
#define TK_PRECISION 82
|
||||
#define TK_REPLICA 83
|
||||
#define TK_VGROUPS 84
|
||||
#define TK_SINGLE_STABLE 85
|
||||
#define TK_RETENTIONS 86
|
||||
#define TK_SCHEMALESS 87
|
||||
#define TK_WAL_LEVEL 88
|
||||
#define TK_WAL_FSYNC_PERIOD 89
|
||||
#define TK_WAL_RETENTION_PERIOD 90
|
||||
#define TK_WAL_RETENTION_SIZE 91
|
||||
#define TK_WAL_ROLL_PERIOD 92
|
||||
#define TK_WAL_SEGMENT_SIZE 93
|
||||
#define TK_STT_TRIGGER 94
|
||||
#define TK_TABLE_PREFIX 95
|
||||
#define TK_TABLE_SUFFIX 96
|
||||
#define TK_NK_COLON 97
|
||||
#define TK_MAX_SPEED 98
|
||||
#define TK_START 99
|
||||
#define TK_OR 1
|
||||
#define TK_AND 2
|
||||
#define TK_UNION 3
|
||||
#define TK_ALL 4
|
||||
#define TK_MINUS 5
|
||||
#define TK_EXCEPT 6
|
||||
#define TK_INTERSECT 7
|
||||
#define TK_NK_BITAND 8
|
||||
#define TK_NK_BITOR 9
|
||||
#define TK_NK_LSHIFT 10
|
||||
#define TK_NK_RSHIFT 11
|
||||
#define TK_NK_PLUS 12
|
||||
#define TK_NK_MINUS 13
|
||||
#define TK_NK_STAR 14
|
||||
#define TK_NK_SLASH 15
|
||||
#define TK_NK_REM 16
|
||||
#define TK_NK_CONCAT 17
|
||||
#define TK_CREATE 18
|
||||
#define TK_ACCOUNT 19
|
||||
#define TK_NK_ID 20
|
||||
#define TK_PASS 21
|
||||
#define TK_NK_STRING 22
|
||||
#define TK_ALTER 23
|
||||
#define TK_PPS 24
|
||||
#define TK_TSERIES 25
|
||||
#define TK_STORAGE 26
|
||||
#define TK_STREAMS 27
|
||||
#define TK_QTIME 28
|
||||
#define TK_DBS 29
|
||||
#define TK_USERS 30
|
||||
#define TK_CONNS 31
|
||||
#define TK_STATE 32
|
||||
#define TK_USER 33
|
||||
#define TK_ENABLE 34
|
||||
#define TK_NK_INTEGER 35
|
||||
#define TK_SYSINFO 36
|
||||
#define TK_DROP 37
|
||||
#define TK_GRANT 38
|
||||
#define TK_ON 39
|
||||
#define TK_TO 40
|
||||
#define TK_REVOKE 41
|
||||
#define TK_FROM 42
|
||||
#define TK_SUBSCRIBE 43
|
||||
#define TK_NK_COMMA 44
|
||||
#define TK_READ 45
|
||||
#define TK_WRITE 46
|
||||
#define TK_NK_DOT 47
|
||||
#define TK_WITH 48
|
||||
#define TK_DNODE 49
|
||||
#define TK_PORT 50
|
||||
#define TK_DNODES 51
|
||||
#define TK_RESTORE 52
|
||||
#define TK_NK_IPTOKEN 53
|
||||
#define TK_FORCE 54
|
||||
#define TK_UNSAFE 55
|
||||
#define TK_LOCAL 56
|
||||
#define TK_QNODE 57
|
||||
#define TK_BNODE 58
|
||||
#define TK_SNODE 59
|
||||
#define TK_MNODE 60
|
||||
#define TK_VNODE 61
|
||||
#define TK_DATABASE 62
|
||||
#define TK_USE 63
|
||||
#define TK_FLUSH 64
|
||||
#define TK_TRIM 65
|
||||
#define TK_COMPACT 66
|
||||
#define TK_IF 67
|
||||
#define TK_NOT 68
|
||||
#define TK_EXISTS 69
|
||||
#define TK_BUFFER 70
|
||||
#define TK_CACHEMODEL 71
|
||||
#define TK_CACHESIZE 72
|
||||
#define TK_COMP 73
|
||||
#define TK_DURATION 74
|
||||
#define TK_NK_VARIABLE 75
|
||||
#define TK_MAXROWS 76
|
||||
#define TK_MINROWS 77
|
||||
#define TK_KEEP 78
|
||||
#define TK_PAGES 79
|
||||
#define TK_PAGESIZE 80
|
||||
#define TK_TSDB_PAGESIZE 81
|
||||
#define TK_PRECISION 82
|
||||
#define TK_REPLICA 83
|
||||
#define TK_VGROUPS 84
|
||||
#define TK_SINGLE_STABLE 85
|
||||
#define TK_RETENTIONS 86
|
||||
#define TK_SCHEMALESS 87
|
||||
#define TK_WAL_LEVEL 88
|
||||
#define TK_WAL_FSYNC_PERIOD 89
|
||||
#define TK_WAL_RETENTION_PERIOD 90
|
||||
#define TK_WAL_RETENTION_SIZE 91
|
||||
#define TK_WAL_ROLL_PERIOD 92
|
||||
#define TK_WAL_SEGMENT_SIZE 93
|
||||
#define TK_STT_TRIGGER 94
|
||||
#define TK_TABLE_PREFIX 95
|
||||
#define TK_TABLE_SUFFIX 96
|
||||
#define TK_NK_COLON 97
|
||||
#define TK_MAX_SPEED 98
|
||||
#define TK_START 99
|
||||
#define TK_TIMESTAMP 100
|
||||
#define TK_END 101
|
||||
#define TK_TABLE 102
|
||||
|
@ -130,25 +130,25 @@
|
|||
#define TK_NK_EQ 112
|
||||
#define TK_USING 113
|
||||
#define TK_TAGS 114
|
||||
#define TK_BOOL 115
|
||||
#define TK_TINYINT 116
|
||||
#define TK_SMALLINT 117
|
||||
#define TK_INT 118
|
||||
#define TK_INTEGER 119
|
||||
#define TK_BIGINT 120
|
||||
#define TK_FLOAT 121
|
||||
#define TK_DOUBLE 122
|
||||
#define TK_BINARY 123
|
||||
#define TK_NCHAR 124
|
||||
#define TK_UNSIGNED 125
|
||||
#define TK_JSON 126
|
||||
#define TK_VARCHAR 127
|
||||
#define TK_MEDIUMBLOB 128
|
||||
#define TK_BLOB 129
|
||||
#define TK_VARBINARY 130
|
||||
#define TK_GEOMETRY 131
|
||||
#define TK_DECIMAL 132
|
||||
#define TK_COMMENT 133
|
||||
#define TK_COMMENT 115
|
||||
#define TK_BOOL 116
|
||||
#define TK_TINYINT 117
|
||||
#define TK_SMALLINT 118
|
||||
#define TK_INT 119
|
||||
#define TK_INTEGER 120
|
||||
#define TK_BIGINT 121
|
||||
#define TK_FLOAT 122
|
||||
#define TK_DOUBLE 123
|
||||
#define TK_BINARY 124
|
||||
#define TK_NCHAR 125
|
||||
#define TK_UNSIGNED 126
|
||||
#define TK_JSON 127
|
||||
#define TK_VARCHAR 128
|
||||
#define TK_MEDIUMBLOB 129
|
||||
#define TK_BLOB 130
|
||||
#define TK_VARBINARY 131
|
||||
#define TK_GEOMETRY 132
|
||||
#define TK_DECIMAL 133
|
||||
#define TK_MAX_DELAY 134
|
||||
#define TK_WATERMARK 135
|
||||
#define TK_ROLLUP 136
|
||||
|
|
|
@ -98,6 +98,16 @@ typedef struct SMTbCursor {
|
|||
int8_t paused;
|
||||
} SMTbCursor;
|
||||
|
||||
typedef struct SMCtbCursor {
|
||||
SMeta *pMeta;
|
||||
void *pCur;
|
||||
tb_uid_t suid;
|
||||
void *pKey;
|
||||
void *pVal;
|
||||
int kLen;
|
||||
int vLen;
|
||||
} SMCtbCursor;
|
||||
|
||||
typedef struct SRowBuffPos {
|
||||
void* pRowBuff;
|
||||
void* pKey;
|
||||
|
@ -278,13 +288,15 @@ typedef struct SStoreMeta {
|
|||
void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
|
||||
int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
|
||||
// metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
|
||||
|
||||
int64_t (*getNumOfRowsInMem)(void* pVnode);
|
||||
/**
|
||||
int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
|
||||
int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
|
||||
int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list);
|
||||
*/
|
||||
SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock);
|
||||
void (*closeCtbCursor)(SMCtbCursor *pCtbCur, int lock);
|
||||
tb_uid_t (*ctbCursorNext)(SMCtbCursor* pCur);
|
||||
} SStoreMeta;
|
||||
|
||||
typedef struct SStoreMetaReader {
|
||||
|
|
|
@ -157,6 +157,8 @@ typedef enum EFunctionType {
|
|||
FUNCTION_TYPE_AVG_MERGE,
|
||||
FUNCTION_TYPE_STDDEV_PARTIAL,
|
||||
FUNCTION_TYPE_STDDEV_MERGE,
|
||||
FUNCTION_TYPE_IRATE_PARTIAL,
|
||||
FUNCTION_TYPE_IRATE_MERGE,
|
||||
|
||||
// geometry functions
|
||||
FUNCTION_TYPE_GEOM_FROM_TEXT = 4250,
|
||||
|
|
|
@ -23,10 +23,11 @@ extern "C" {
|
|||
#include "query.h"
|
||||
#include "querynodes.h"
|
||||
|
||||
#define DESCRIBE_RESULT_COLS 4
|
||||
#define DESCRIBE_RESULT_FIELD_LEN (TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE)
|
||||
#define DESCRIBE_RESULT_TYPE_LEN (20 + VARSTR_HEADER_SIZE)
|
||||
#define DESCRIBE_RESULT_NOTE_LEN (8 + VARSTR_HEADER_SIZE)
|
||||
#define DESCRIBE_RESULT_COLS 5
|
||||
#define DESCRIBE_RESULT_FIELD_LEN (TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE)
|
||||
#define DESCRIBE_RESULT_TYPE_LEN (20 + VARSTR_HEADER_SIZE)
|
||||
#define DESCRIBE_RESULT_NOTE_LEN (8 + VARSTR_HEADER_SIZE)
|
||||
#define DESCRIBE_RESULT_COL_COMMENT_LEN (TSDB_COL_COMMENT_LEN)
|
||||
|
||||
#define SHOW_CREATE_DB_RESULT_COLS 2
|
||||
#define SHOW_CREATE_DB_RESULT_FIELD1_LEN (TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE)
|
||||
|
@ -155,7 +156,7 @@ typedef struct SColumnDefNode {
|
|||
ENodeType type;
|
||||
char colName[TSDB_COL_NAME_LEN];
|
||||
SDataType dataType;
|
||||
char comments[TSDB_TB_COMMENT_LEN];
|
||||
char comments[TSDB_COL_COMMENT_LEN];
|
||||
bool sma;
|
||||
} SColumnDefNode;
|
||||
|
||||
|
@ -214,6 +215,7 @@ typedef struct SAlterTableStmt {
|
|||
char newColName[TSDB_COL_NAME_LEN];
|
||||
STableOptions* pOptions;
|
||||
SDataType dataType;
|
||||
char colComment[TSDB_COL_COMMENT_LEN];
|
||||
SValueNode* pVal;
|
||||
} SAlterTableStmt;
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ typedef enum ENodeType {
|
|||
QUERY_NODE_REVOKE_STMT,
|
||||
QUERY_NODE_SHOW_DNODES_STMT,
|
||||
QUERY_NODE_SHOW_MNODES_STMT,
|
||||
QUERY_NODE_SHOW_MODULES_STMT,
|
||||
// QUERY_NODE_SHOW_MODULES_STMT,
|
||||
QUERY_NODE_SHOW_QNODES_STMT,
|
||||
QUERY_NODE_SHOW_SNODES_STMT,
|
||||
QUERY_NODE_SHOW_BNODES_STMT,
|
||||
|
|
|
@ -107,6 +107,7 @@ typedef struct SScanLogicNode {
|
|||
bool sortPrimaryKey;
|
||||
bool igLastNull;
|
||||
bool groupOrderScan;
|
||||
bool onlyMetaCtbIdx; // for tag scan with no tbname
|
||||
} SScanLogicNode;
|
||||
|
||||
typedef struct SJoinLogicNode {
|
||||
|
@ -334,7 +335,11 @@ typedef struct SScanPhysiNode {
|
|||
bool groupOrderScan;
|
||||
} SScanPhysiNode;
|
||||
|
||||
typedef SScanPhysiNode STagScanPhysiNode;
|
||||
typedef struct STagScanPhysiNode {
|
||||
SScanPhysiNode scan;
|
||||
bool onlyMetaCtbIdx; //no tbname, tag index not used.
|
||||
} STagScanPhysiNode;
|
||||
|
||||
typedef SScanPhysiNode SBlockDistScanPhysiNode;
|
||||
|
||||
typedef struct SLastRowScanPhysiNode {
|
||||
|
@ -603,6 +608,8 @@ typedef struct SSubplan {
|
|||
SNode* pTagCond;
|
||||
SNode* pTagIndexCond;
|
||||
bool showRewrite;
|
||||
int32_t rowsThreshold;
|
||||
bool dynamicRowThreshold;
|
||||
} SSubplan;
|
||||
|
||||
typedef enum EExplainMode { EXPLAIN_MODE_DISABLE = 1, EXPLAIN_MODE_STATIC, EXPLAIN_MODE_ANALYZE } EExplainMode;
|
||||
|
|
|
@ -122,6 +122,7 @@ typedef struct {
|
|||
int8_t type;
|
||||
|
||||
int32_t srcVgId;
|
||||
int32_t srcTaskId;
|
||||
int32_t childId;
|
||||
int64_t sourceVer;
|
||||
int64_t reqId;
|
||||
|
@ -251,6 +252,7 @@ typedef struct SStreamChildEpInfo {
|
|||
int32_t nodeId;
|
||||
int32_t childId;
|
||||
int32_t taskId;
|
||||
int8_t dataAllowed;
|
||||
SEpSet epSet;
|
||||
} SStreamChildEpInfo;
|
||||
|
||||
|
@ -272,6 +274,7 @@ typedef struct SStreamStatus {
|
|||
int8_t schedStatus;
|
||||
int8_t keepTaskStatus;
|
||||
bool transferState;
|
||||
bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it
|
||||
int8_t timerActive; // timer is active
|
||||
int8_t pauseAllowed; // allowed task status to be set to be paused
|
||||
} SStreamStatus;
|
||||
|
@ -399,8 +402,9 @@ typedef struct {
|
|||
|
||||
typedef struct {
|
||||
int64_t streamId;
|
||||
int32_t type;
|
||||
int32_t taskId;
|
||||
int32_t dataSrcVgId;
|
||||
int32_t srcVgId;
|
||||
int32_t upstreamTaskId;
|
||||
int32_t upstreamChildId;
|
||||
int32_t upstreamNodeId;
|
||||
|
@ -570,8 +574,6 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
|
|||
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
|
||||
void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq);
|
||||
|
||||
int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, int32_t numOfBlocks,
|
||||
int64_t dstTaskId);
|
||||
void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
|
||||
|
||||
int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
|
||||
|
@ -579,6 +581,8 @@ int32_t streamSetupScheduleTrigger(SStreamTask* pTask);
|
|||
int32_t streamProcessRunReq(SStreamTask* pTask);
|
||||
int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec);
|
||||
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code);
|
||||
void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId);
|
||||
void streamTaskOpenAllUpstreamInput(SStreamTask* pTask);
|
||||
|
||||
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
|
||||
|
||||
|
@ -589,7 +593,6 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock
|
|||
bool streamTaskShouldStop(const SStreamStatus* pStatus);
|
||||
bool streamTaskShouldPause(const SStreamStatus* pStatus);
|
||||
bool streamTaskIsIdle(const SStreamTask* pTask);
|
||||
int32_t streamTaskEndScanWAL(SStreamTask* pTask);
|
||||
|
||||
SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId);
|
||||
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize);
|
||||
|
@ -626,7 +629,7 @@ int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* p
|
|||
int32_t streamSourceScanHistoryData(SStreamTask* pTask);
|
||||
int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask);
|
||||
|
||||
int32_t streamDispatchTransferStateMsg(SStreamTask* pTask);
|
||||
int32_t appendTranstateIntoInputQ(SStreamTask* pTask);
|
||||
|
||||
// agg level
|
||||
int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask);
|
||||
|
|
|
@ -76,7 +76,7 @@ int32_t taosUnLockFile(TdFilePtr pFile);
|
|||
|
||||
int32_t taosUmaskFile(int32_t maskVal);
|
||||
|
||||
int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime);
|
||||
int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime);
|
||||
int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno);
|
||||
int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime);
|
||||
bool taosCheckExistFile(const char *pathname);
|
||||
|
|
|
@ -32,6 +32,8 @@ void taosSeedRand(uint32_t seed);
|
|||
uint32_t taosRand(void);
|
||||
uint32_t taosRandR(uint32_t* pSeed);
|
||||
void taosRandStr(char* str, int32_t size);
|
||||
void taosRandStr2(char* str, int32_t size);
|
||||
|
||||
uint32_t taosSafeRand(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -191,6 +191,7 @@ int32_t* taosGetErrno();
|
|||
// #define TSDB_CODE_MND_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0313) // 2.x
|
||||
// #define TSDB_CODE_MND_FAILED_TO_INIT_STEP TAOS_DEF_ERROR_CODE(0, 0x0314) // 2.x
|
||||
#define TSDB_CODE_MND_USER_DISABLED TAOS_DEF_ERROR_CODE(0, 0x0315)
|
||||
#define TSDB_CODE_MND_INVALID_PLATFORM TAOS_DEF_ERROR_CODE(0, 0x0316)
|
||||
|
||||
// mnode-sdb
|
||||
#define TSDB_CODE_SDB_OBJ_ALREADY_THERE TAOS_DEF_ERROR_CODE(0, 0x0320) // internal
|
||||
|
|
|
@ -230,6 +230,7 @@ typedef enum ELogicConditionType {
|
|||
|
||||
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN
|
||||
#define TSDB_TB_COMMENT_LEN 1025
|
||||
#define TSDB_COL_COMMENT_LEN 1025
|
||||
|
||||
#define TSDB_QUERY_ID_LEN 26
|
||||
#define TSDB_TRANS_OPER_LEN 16
|
||||
|
|
|
@ -16,7 +16,7 @@ target_include_directories(
|
|||
target_link_libraries(
|
||||
taos
|
||||
INTERFACE api
|
||||
PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom
|
||||
PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom geometry
|
||||
)
|
||||
|
||||
if(TD_DARWIN_ARM64)
|
||||
|
@ -57,7 +57,7 @@ target_include_directories(
|
|||
target_link_libraries(
|
||||
taos_static
|
||||
INTERFACE api
|
||||
PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom
|
||||
PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom geometry
|
||||
)
|
||||
|
||||
if(${BUILD_TEST})
|
||||
|
|
|
@ -33,6 +33,7 @@ extern "C" {
|
|||
#include "ttime.h"
|
||||
#include "ttypes.h"
|
||||
#include "cJSON.h"
|
||||
#include "geosWrapper.h"
|
||||
|
||||
#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
|
||||
# define expect(expr,value) (__builtin_expect ((expr),(value)) )
|
||||
|
@ -192,7 +193,7 @@ typedef struct {
|
|||
//
|
||||
SArray *preLineTagKV;
|
||||
SArray *maxTagKVs;
|
||||
SArray *masColKVs;
|
||||
SArray *maxColKVs;
|
||||
|
||||
SSmlLineInfo preLine;
|
||||
STableMeta *currSTableMeta;
|
||||
|
|
|
@ -1073,6 +1073,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
|
|||
return 0;
|
||||
|
||||
end:
|
||||
taosHashCancelIterate(info->superTables, tmp);
|
||||
taosHashCleanup(hashTmp);
|
||||
taosMemoryFreeClear(pTableMeta);
|
||||
catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1);
|
||||
|
@ -1191,6 +1192,7 @@ void freeSSmlKv(void *data) {
|
|||
SSmlKv *kv = (SSmlKv *)data;
|
||||
if (kv->keyEscaped) taosMemoryFree((void *)(kv->key));
|
||||
if (kv->valueEscaped) taosMemoryFree((void *)(kv->value));
|
||||
if (kv->type == TSDB_DATA_TYPE_GEOMETRY) geosFreeBuffer((void *)(kv->value));
|
||||
}
|
||||
|
||||
void smlDestroyInfo(SSmlHandle *info) {
|
||||
|
@ -1433,6 +1435,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE);
|
||||
if(code != TSDB_CODE_SUCCESS){
|
||||
taosMemoryFree(measure);
|
||||
taosHashCancelIterate(info->childTables, oneTable);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -1441,6 +1444,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " catalogGetTableHashVgroup failed. table name: %s", info->id, tableData->childTableName);
|
||||
taosMemoryFree(measure);
|
||||
taosHashCancelIterate(info->childTables, oneTable);
|
||||
return code;
|
||||
}
|
||||
taosHashPut(info->pVgHash, (const char *)&vg.vgId, sizeof(vg.vgId), (char *)&vg, sizeof(vg));
|
||||
|
@ -1450,6 +1454,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
if (unlikely(NULL == pMeta || NULL == (*pMeta)->tableMeta)) {
|
||||
uError("SML:0x%" PRIx64 " NULL == pMeta. table name: %s", info->id, tableData->childTableName);
|
||||
taosMemoryFree(measure);
|
||||
taosHashCancelIterate(info->childTables, oneTable);
|
||||
return TSDB_CODE_SML_INTERNAL_ERROR;
|
||||
}
|
||||
|
||||
|
@ -1465,6 +1470,7 @@ static int32_t smlInsertData(SSmlHandle *info) {
|
|||
taosMemoryFree(measure);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
uError("SML:0x%" PRIx64 " smlBindData failed", info->id);
|
||||
taosHashCancelIterate(info->childTables, oneTable);
|
||||
return code;
|
||||
}
|
||||
oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, oneTable);
|
||||
|
|
|
@ -102,6 +102,30 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) {
|
|||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
if (pVal->value[0] == 'g' || pVal->value[0] == 'G') { // geometry
|
||||
if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= sizeof("POINT")+3) {
|
||||
int32_t code = initCtxGeomFromText();
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
char* tmp = taosMemoryCalloc(pVal->length, 1);
|
||||
memcpy(tmp, pVal->value + 2, pVal->length - 3);
|
||||
code = doGeomFromText(tmp, (unsigned char **)&pVal->value, &pVal->length);
|
||||
taosMemoryFree(tmp);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
return code;
|
||||
}
|
||||
|
||||
pVal->type = TSDB_DATA_TYPE_GEOMETRY;
|
||||
if (pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
|
||||
geosFreeBuffer((void*)(pVal->value));
|
||||
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
return TSDB_CODE_TSC_INVALID_VALUE;
|
||||
}
|
||||
|
||||
if (pVal->value[0] == 't' || pVal->value[0] == 'T') {
|
||||
if (pVal->length == 1 ||
|
||||
(pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R') &&
|
||||
|
@ -390,14 +414,14 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
|
|||
SSmlKv kv = {.key = tag->name, .keyLen = strlen(tag->name), .type = tag->type};
|
||||
if (tag->type == TSDB_DATA_TYPE_NCHAR) {
|
||||
kv.length = (tag->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
|
||||
} else if (tag->type == TSDB_DATA_TYPE_BINARY) {
|
||||
} else if (tag->type == TSDB_DATA_TYPE_BINARY || tag->type == TSDB_DATA_TYPE_GEOMETRY) {
|
||||
kv.length = tag->bytes - VARSTR_HEADER_SIZE;
|
||||
}
|
||||
taosArrayPush((*tmp)->cols, &kv);
|
||||
}
|
||||
}
|
||||
info->currSTableMeta = (*tmp)->tableMeta;
|
||||
info->masColKVs = (*tmp)->cols;
|
||||
info->maxColKVs = (*tmp)->cols;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -512,13 +536,13 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
|
|||
freeSSmlKv(&kv);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
if (cnt >= taosArrayGetSize(info->masColKVs)) {
|
||||
if (cnt >= taosArrayGetSize(info->maxColKVs)) {
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
freeSSmlKv(&kv);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->masColKVs, cnt);
|
||||
SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->maxColKVs, cnt);
|
||||
if (kv.type != maxKV->type) {
|
||||
info->dataFormat = false;
|
||||
info->reRun = true;
|
||||
|
@ -663,14 +687,15 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine
|
|||
if (info->dataFormat) {
|
||||
uDebug("SML:0x%" PRIx64 " smlParseInfluxString format true, ts:%" PRId64, info->id, ts);
|
||||
ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 0);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
ret = smlBuildRow(info->currTableDataCtx);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
if (ret == TSDB_CODE_SUCCESS) {
|
||||
ret = smlBuildRow(info->currTableDataCtx);
|
||||
}
|
||||
|
||||
clearColValArray(info->currTableDataCtx->pValues);
|
||||
if (unlikely(ret != TSDB_CODE_SUCCESS)) {
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
uDebug("SML:0x%" PRIx64 " smlParseInfluxString format false, ts:%" PRId64, info->id, ts);
|
||||
taosArraySet(elements->colArray, 0, &kv);
|
||||
|
|
|
@ -20,7 +20,7 @@ TARGET_LINK_LIBRARIES(
|
|||
ADD_EXECUTABLE(smlTest smlTest.cpp)
|
||||
TARGET_LINK_LIBRARIES(
|
||||
smlTest
|
||||
PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom
|
||||
PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom geometry
|
||||
)
|
||||
|
||||
TARGET_INCLUDE_DIRECTORIES(
|
||||
|
|
|
@ -16,6 +16,14 @@ ENDIF ()
|
|||
IF (TD_STORAGE)
|
||||
ADD_DEFINITIONS(-D_STORAGE)
|
||||
TARGET_LINK_LIBRARIES(common PRIVATE storage)
|
||||
|
||||
IF(${TD_LINUX})
|
||||
IF(${BUILD_WITH_COS})
|
||||
add_definitions(-DUSE_COS)
|
||||
ENDIF(${BUILD_WITH_COS})
|
||||
|
||||
ENDIF(${TD_LINUX})
|
||||
|
||||
ENDIF ()
|
||||
|
||||
target_include_directories(
|
||||
|
|
|
@ -314,7 +314,7 @@ static const SSysDbTableSchema userUserPrivilegesSchema[] = {
|
|||
static const SSysTableMeta infosMeta[] = {
|
||||
{TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true},
|
||||
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
|
||||
{TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true},
|
||||
// {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true},
|
||||
{TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true},
|
||||
{TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema), true},
|
||||
{TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true},
|
||||
|
|
|
@ -26,7 +26,7 @@ int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRo
|
|||
if (pColumnInfoData->reassigned) {
|
||||
int32_t totalSize = 0;
|
||||
for (int32_t row = 0; row < numOfRows; ++row) {
|
||||
char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row];
|
||||
char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row];
|
||||
int32_t colSize = 0;
|
||||
if (pColumnInfoData->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colSize = getJsonValueLen(pColData);
|
||||
|
@ -142,7 +142,8 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData) {
|
||||
int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx,
|
||||
const char* pData) {
|
||||
int32_t type = pColumnInfoData->info.type;
|
||||
if (IS_VAR_DATA_TYPE(type)) {
|
||||
int32_t dataLen = 0;
|
||||
|
@ -164,7 +165,6 @@ int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) {
|
||||
if (!IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -188,16 +188,17 @@ static int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize)
|
|||
}
|
||||
|
||||
static int32_t doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t currentRow, const char* pData,
|
||||
int32_t itemLen, int32_t numOfRows, bool trimValue) {
|
||||
int32_t itemLen, int32_t numOfRows, bool trimValue) {
|
||||
if (pColumnInfoData->info.bytes < itemLen) {
|
||||
uWarn("column/tag actual data len %d is bigger than schema len %d, trim it:%d", itemLen, pColumnInfoData->info.bytes, trimValue);
|
||||
uWarn("column/tag actual data len %d is bigger than schema len %d, trim it:%d", itemLen,
|
||||
pColumnInfoData->info.bytes, trimValue);
|
||||
if (trimValue) {
|
||||
itemLen = pColumnInfoData->info.bytes;
|
||||
} else {
|
||||
return TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
size_t start = 1;
|
||||
|
||||
// the first item
|
||||
|
@ -230,8 +231,8 @@ static int32_t doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t cur
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
|
||||
uint32_t numOfRows, bool trimValue) {
|
||||
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows,
|
||||
bool trimValue) {
|
||||
int32_t len = pColumnInfoData->info.bytes;
|
||||
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
|
||||
len = varDataTLen(pData);
|
||||
|
@ -262,7 +263,7 @@ static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, c
|
|||
|
||||
uint8_t* p = (uint8_t*)pSource->nullbitmap;
|
||||
pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] &= (0B11111111 << shiftBits); // clear remind bits
|
||||
pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] |= (p[0] >> remindBits); // copy remind bits
|
||||
pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] |= (p[0] >> remindBits); // copy remind bits
|
||||
|
||||
if (BitmapLen(numOfRow1) == BitmapLen(total)) {
|
||||
return;
|
||||
|
@ -350,7 +351,7 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int
|
|||
|
||||
pColumnInfoData->pData = tmp;
|
||||
if (BitmapLen(numOfRow1) < BitmapLen(finalNumOfRows)) {
|
||||
char* btmp = taosMemoryRealloc(pColumnInfoData->nullbitmap, BitmapLen(finalNumOfRows));
|
||||
char* btmp = taosMemoryRealloc(pColumnInfoData->nullbitmap, BitmapLen(finalNumOfRows));
|
||||
if (btmp == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
@ -622,7 +623,7 @@ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) {
|
|||
|
||||
if (pCol->reassigned && IS_VAR_DATA_TYPE(pCol->info.type)) {
|
||||
for (int32_t row = 0; row < numOfRows; ++row) {
|
||||
char* pColData = pCol->pData + pCol->varmeta.offset[row];
|
||||
char* pColData = pCol->pData + pCol->varmeta.offset[row];
|
||||
int32_t colSize = 0;
|
||||
if (pCol->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colSize = getJsonValueLen(pColData);
|
||||
|
@ -698,8 +699,7 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) {
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static bool colDataIsNNull(const SColumnInfoData* pColumnInfoData, int32_t startIndex,
|
||||
uint32_t nRows) {
|
||||
static bool colDataIsNNull(const SColumnInfoData* pColumnInfoData, int32_t startIndex, uint32_t nRows) {
|
||||
if (!pColumnInfoData->hasNull) {
|
||||
return false;
|
||||
}
|
||||
|
@ -880,7 +880,6 @@ int32_t dataBlockCompar(const void* p1, const void* p2, const void* param) {
|
|||
}
|
||||
|
||||
static int32_t blockDataAssign(SColumnInfoData* pCols, const SSDataBlock* pDataBlock, const int32_t* index) {
|
||||
|
||||
size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock);
|
||||
for (int32_t i = 0; i < numOfCols; ++i) {
|
||||
SColumnInfoData* pDst = &pCols[i];
|
||||
|
@ -1131,6 +1130,7 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo*
|
|||
if (tmp == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
// memset(tmp, 0, numOfRows * pColumn->info.bytes);
|
||||
|
||||
// copy back the existed data
|
||||
if (pColumn->pData != NULL) {
|
||||
|
@ -1474,8 +1474,8 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int
|
|||
int end = nRows;
|
||||
while (start <= end) {
|
||||
int mid = start + (end - start) / 2;
|
||||
//data size + var data type columns offset + fixed data type columns bitmap len
|
||||
int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid);
|
||||
// data size + var data type columns offset + fixed data type columns bitmap len
|
||||
int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid);
|
||||
if (midSize > payloadSize) {
|
||||
result = mid;
|
||||
end = mid - 1;
|
||||
|
@ -1669,7 +1669,7 @@ int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) {
|
|||
|
||||
if (pColData->reassigned && IS_VAR_DATA_TYPE(pColData->info.type)) {
|
||||
for (int32_t row = 0; row < rows; ++row) {
|
||||
char* pData = pColData->pData + pColData->varmeta.offset[row];
|
||||
char* pData = pColData->pData + pColData->varmeta.offset[row];
|
||||
int32_t colSize = 0;
|
||||
if (pColData->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colSize = getJsonValueLen(pData);
|
||||
|
@ -1771,8 +1771,8 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
|
|||
}
|
||||
|
||||
// for debug
|
||||
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) {
|
||||
int32_t size = 2048*1024;
|
||||
char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf, const char* taskIdStr) {
|
||||
int32_t size = 2048 * 1024;
|
||||
*pDataBuf = taosMemoryCalloc(size, 1);
|
||||
char* dumpBuf = *pDataBuf;
|
||||
char pBuf[128] = {0};
|
||||
|
@ -1780,9 +1780,9 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
|
|||
int32_t rows = pDataBlock->info.rows;
|
||||
int32_t len = 0;
|
||||
len += snprintf(dumpBuf + len, size - len,
|
||||
"===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64
|
||||
"%s===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64
|
||||
"|rows:%" PRId64 "|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "|tbl:%s\n",
|
||||
flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.id.groupId,
|
||||
taskIdStr, flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.id.groupId,
|
||||
pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version,
|
||||
pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey, pDataBlock->info.parTbName);
|
||||
if (len >= size - 1) return dumpBuf;
|
||||
|
@ -2156,21 +2156,21 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
|
|||
data += metaSize;
|
||||
dataLen += metaSize;
|
||||
|
||||
if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) {
|
||||
colSizes[col] = 0;
|
||||
for (int32_t row = 0; row < numOfRows; ++row) {
|
||||
char* pColData = pColRes->pData + pColRes->varmeta.offset[row];
|
||||
int32_t colSize = 0;
|
||||
if (pColRes->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colSize = getJsonValueLen(pColData);
|
||||
} else {
|
||||
colSize = varDataTLen(pColData);
|
||||
}
|
||||
colSizes[col] += colSize;
|
||||
dataLen += colSize;
|
||||
memmove(data, pColData, colSize);
|
||||
data += colSize;
|
||||
if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) {
|
||||
colSizes[col] = 0;
|
||||
for (int32_t row = 0; row < numOfRows; ++row) {
|
||||
char* pColData = pColRes->pData + pColRes->varmeta.offset[row];
|
||||
int32_t colSize = 0;
|
||||
if (pColRes->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
colSize = getJsonValueLen(pColData);
|
||||
} else {
|
||||
colSize = varDataTLen(pColData);
|
||||
}
|
||||
colSizes[col] += colSize;
|
||||
dataLen += colSize;
|
||||
memmove(data, pColData, colSize);
|
||||
data += colSize;
|
||||
}
|
||||
} else {
|
||||
colSizes[col] = colDataGetLength(pColRes, numOfRows);
|
||||
dataLen += colSizes[col];
|
||||
|
@ -2181,7 +2181,8 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
|
|||
}
|
||||
|
||||
colSizes[col] = htonl(colSizes[col]);
|
||||
// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, htonl(colSizes[col]), colSizes[col]);
|
||||
// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type,
|
||||
// htonl(colSizes[col]), colSizes[col]);
|
||||
}
|
||||
|
||||
*actualLen = dataLen;
|
||||
|
@ -2283,7 +2284,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
|
|||
}
|
||||
|
||||
void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList) {
|
||||
// int32_t totalRows = pBlock->info.rows;
|
||||
// int32_t totalRows = pBlock->info.rows;
|
||||
int32_t bmLen = BitmapLen(totalRows);
|
||||
char* pBitmap = NULL;
|
||||
int32_t maxRows = 0;
|
||||
|
@ -2310,8 +2311,9 @@ void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList
|
|||
if (colDataIsNull_var(pDst, j)) {
|
||||
colDataSetNull_var(pDst, numOfRows);
|
||||
} else {
|
||||
// fix address sanitizer error. p1 may point to memory that will change during realloc of colDataSetVal, first copy it to p2
|
||||
char* p1 = colDataGetVarData(pDst, j);
|
||||
// fix address sanitizer error. p1 may point to memory that will change during realloc of colDataSetVal, first
|
||||
// copy it to p2
|
||||
char* p1 = colDataGetVarData(pDst, j);
|
||||
int32_t len = 0;
|
||||
if (pDst->info.type == TSDB_DATA_TYPE_JSON) {
|
||||
len = getJsonValueLen(p1);
|
||||
|
|
|
@ -216,7 +216,11 @@ uint32_t tsCurRange = 100; // range
|
|||
char tsCompressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
|
||||
|
||||
// udf
|
||||
bool tsStartUdfd = true;
|
||||
#ifdef WINDOWS
|
||||
bool tsStartUdfd = false;
|
||||
#else
|
||||
bool tsStartUdfd = true;
|
||||
#endif
|
||||
|
||||
// wal
|
||||
int64_t tsWalFsyncDataSizeLimit = (100 * 1024 * 1024L);
|
||||
|
@ -236,6 +240,15 @@ int64_t tsStreamBufferSize = 128 * 1024 * 1024;
|
|||
int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000;
|
||||
bool tsFilterScalarMode = false;
|
||||
int32_t tsKeepTimeOffset = 0; // latency of data migration
|
||||
int tsResolveFQDNRetryTime = 100; //seconds
|
||||
|
||||
char tsS3Endpoint[TSDB_FQDN_LEN] = "<endpoint>";
|
||||
char tsS3AccessKey[TSDB_FQDN_LEN] = "<accesskey>";
|
||||
char tsS3AccessKeyId[TSDB_FQDN_LEN] = "<accesskeyid>";
|
||||
char tsS3AccessKeySecret[TSDB_FQDN_LEN] = "<accesskeysecrect>";
|
||||
char tsS3BucketName[TSDB_FQDN_LEN] = "<bucketname>";
|
||||
char tsS3AppId[TSDB_FQDN_LEN] = "<appid>";
|
||||
int8_t tsS3Enabled = false;
|
||||
|
||||
#ifndef _STORAGE
|
||||
int32_t taosSetTfsCfg(SConfig *pCfg) {
|
||||
|
@ -258,7 +271,43 @@ int32_t taosSetTfsCfg(SConfig *pCfg) {
|
|||
int32_t taosSetTfsCfg(SConfig *pCfg);
|
||||
#endif
|
||||
|
||||
struct SConfig *taosGetCfg() { return tsCfg; }
|
||||
int32_t taosSetS3Cfg(SConfig *pCfg) {
|
||||
tstrncpy(tsS3AccessKey, cfgGetItem(pCfg, "s3Accesskey")->str, TSDB_FQDN_LEN);
|
||||
if (tsS3AccessKey[0] == '<') {
|
||||
return 0;
|
||||
}
|
||||
char *colon = strchr(tsS3AccessKey, ':');
|
||||
if (!colon) {
|
||||
uError("invalid access key:%s", tsS3AccessKey);
|
||||
return -1;
|
||||
}
|
||||
*colon = '\0';
|
||||
tstrncpy(tsS3AccessKeyId, tsS3AccessKey, TSDB_FQDN_LEN);
|
||||
tstrncpy(tsS3AccessKeySecret, colon + 1, TSDB_FQDN_LEN);
|
||||
tstrncpy(tsS3Endpoint, cfgGetItem(pCfg, "s3Endpoint")->str, TSDB_FQDN_LEN);
|
||||
tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN);
|
||||
char *cos = strstr(tsS3Endpoint, "cos.");
|
||||
if (cos) {
|
||||
char *appid = strrchr(tsS3BucketName, '-');
|
||||
if (!appid) {
|
||||
uError("failed to locate appid in bucket:%s", tsS3BucketName);
|
||||
return -1;
|
||||
} else {
|
||||
tstrncpy(tsS3AppId, appid + 1, TSDB_FQDN_LEN);
|
||||
}
|
||||
}
|
||||
if (tsS3BucketName[0] != '<' && tsDiskCfgNum > 1) {
|
||||
#ifdef USE_COS
|
||||
tsS3Enabled = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct SConfig *taosGetCfg() {
|
||||
return tsCfg;
|
||||
}
|
||||
|
||||
static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile,
|
||||
char *apolloUrl) {
|
||||
|
@ -580,6 +629,11 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
|
|||
if (cfgAddInt32(pCfg, "keepTimeOffset", tsKeepTimeOffset, 0, 23, CFG_SCOPE_SERVER) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER) != 0) return -1;
|
||||
if (cfgAddInt32(pCfg, "resolveFQDNRetryTime", tsResolveFQDNRetryTime, 1, 10240, 0) != 0) return -1;
|
||||
|
||||
if (cfgAddString(pCfg, "s3Accesskey", tsS3AccessKey, CFG_SCOPE_SERVER) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint, CFG_SCOPE_SERVER) != 0) return -1;
|
||||
if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1;
|
||||
|
||||
GRANT_CFG_ADD;
|
||||
return 0;
|
||||
|
@ -979,6 +1033,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
|
|||
tsKeepTimeOffset = cfgGetItem(pCfg, "keepTimeOffset")->i32;
|
||||
tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32;
|
||||
tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32;
|
||||
tsResolveFQDNRetryTime = cfgGetItem(pCfg, "resolveFQDNRetryTime")->i32;
|
||||
|
||||
GRANT_CFG_GET;
|
||||
return 0;
|
||||
|
@ -1502,6 +1557,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
|
|||
if (taosSetServerCfg(tsCfg)) return -1;
|
||||
if (taosSetReleaseCfg(tsCfg)) return -1;
|
||||
if (taosSetTfsCfg(tsCfg) != 0) return -1;
|
||||
if (taosSetS3Cfg(tsCfg) != 0) return -1;
|
||||
}
|
||||
taosSetSystemCfg(tsCfg);
|
||||
|
||||
|
|
|
@ -534,6 +534,7 @@ int32_t tSerializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pReq
|
|||
if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pField->comment) < 0) return -1;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pReq->numOfTags; ++i) {
|
||||
|
@ -542,6 +543,7 @@ int32_t tSerializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pReq
|
|||
if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
|
||||
if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
|
||||
if (tEncodeCStr(&encoder, pField->comment) < 0) return -1;
|
||||
}
|
||||
|
||||
for (int32_t i = 0; i < pReq->numOfFuncs; ++i) {
|
||||
|
@ -608,6 +610,7 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR
|
|||
if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &field.bytes) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, field.name) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, field.comment) < 0) return -1;
|
||||
if (taosArrayPush(pReq->pColumns, &field) == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
|
@ -620,6 +623,7 @@ int32_t tDeserializeSMCreateStbReq(void *buf, int32_t bufLen, SMCreateStbReq *pR
|
|||
if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
|
||||
if (tDecodeI32(&decoder, &field.bytes) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, field.name) < 0) return -1;
|
||||
if (tDecodeCStrTo(&decoder, field.comment) < 0) return -1;
|
||||
if (taosArrayPush(pReq->pTags, &field) == NULL) {
|
||||
terrno = TSDB_CODE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
|
@ -2323,7 +2327,7 @@ int32_t tDeserializeSTableCfgRsp(void *buf, int32_t bufLen, STableCfgRsp *pRsp)
|
|||
}
|
||||
|
||||
int32_t totalCols = pRsp->numOfTags + pRsp->numOfColumns;
|
||||
pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols);
|
||||
pRsp->pSchemas = taosMemoryCalloc(totalCols, sizeof(SSchema));
|
||||
if (pRsp->pSchemas == NULL) return -1;
|
||||
|
||||
for (int32_t i = 0; i < totalCols; ++i) {
|
||||
|
@ -3706,7 +3710,7 @@ static int32_t tDecodeSTableMetaRsp(SDecoder *pDecoder, STableMetaRsp *pRsp) {
|
|||
|
||||
int32_t totalCols = pRsp->numOfTags + pRsp->numOfColumns;
|
||||
if (totalCols > 0) {
|
||||
pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols);
|
||||
pRsp->pSchemas = taosMemoryCalloc(totalCols, sizeof(SSchema));
|
||||
if (pRsp->pSchemas == NULL) return -1;
|
||||
|
||||
for (int32_t i = 0; i < totalCols; ++i) {
|
||||
|
|
|
@ -46,7 +46,7 @@ static int32_t mmDecodeOption(SJson *pJson, SMnodeOpt *pOption) {
|
|||
if (code < 0) return -1;
|
||||
tjsonGetInt32ValueFromDouble(replica, "role", pOption->nodeRoles[i], code);
|
||||
if (code < 0) return -1;
|
||||
if(pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER){
|
||||
if (pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER) {
|
||||
pOption->numOfReplicas++;
|
||||
}
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ int32_t mmReadFile(const char *path, SMnodeOpt *pOption) {
|
|||
char file[PATH_MAX] = {0};
|
||||
snprintf(file, sizeof(file), "%s%smnode.json", path, TD_DIRSEP);
|
||||
|
||||
if (taosStatFile(file, NULL, NULL) < 0) {
|
||||
if (taosStatFile(file, NULL, NULL, NULL) < 0) {
|
||||
dInfo("mnode file:%s not exist", file);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t
|
|||
SWrapperCfg *pCfgs = NULL;
|
||||
snprintf(file, sizeof(file), "%s%svnodes.json", pMgmt->path, TD_DIRSEP);
|
||||
|
||||
if (taosStatFile(file, NULL, NULL) < 0) {
|
||||
if (taosStatFile(file, NULL, NULL, NULL) < 0) {
|
||||
dInfo("vnode file:%s not exist", file);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -742,7 +742,6 @@ SArray *vmGetMsgHandles() {
|
|||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_SCAN_HISTORY_FINISH, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_SCAN_HISTORY_FINISH_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TRANSFER_STATE, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECK, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECK_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
|
||||
|
|
|
@ -100,7 +100,7 @@ int32_t dmReadEps(SDnodeData *pData) {
|
|||
goto _OVER;
|
||||
}
|
||||
|
||||
if (taosStatFile(file, NULL, NULL) < 0) {
|
||||
if (taosStatFile(file, NULL, NULL, NULL) < 0) {
|
||||
dInfo("dnode file:%s not exist", file);
|
||||
code = 0;
|
||||
goto _OVER;
|
||||
|
@ -350,7 +350,7 @@ void dmRotateMnodeEpSet(SDnodeData *pData) {
|
|||
}
|
||||
|
||||
void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
||||
if(!pData->validMnodeEps) return;
|
||||
if (!pData->validMnodeEps) return;
|
||||
dmGetMnodeEpSet(pData, pEpSet);
|
||||
dTrace("msg is redirected, handle:%p num:%d use:%d", pMsg->info.handle, pEpSet->numOfEps, pEpSet->inUse);
|
||||
for (int32_t i = 0; i < pEpSet->numOfEps; ++i) {
|
||||
|
@ -469,7 +469,7 @@ static int32_t dmReadDnodePairs(SDnodeData *pData) {
|
|||
char file[PATH_MAX] = {0};
|
||||
snprintf(file, sizeof(file), "%s%sdnode%sep.json", tsDataDir, TD_DIRSEP, TD_DIRSEP);
|
||||
|
||||
if (taosStatFile(file, NULL, NULL) < 0) {
|
||||
if (taosStatFile(file, NULL, NULL, NULL) < 0) {
|
||||
dDebug("dnode file:%s not exist", file);
|
||||
code = 0;
|
||||
goto _OVER;
|
||||
|
|
|
@ -38,7 +38,7 @@ int32_t dmReadFile(const char *path, const char *name, bool *pDeployed) {
|
|||
char file[PATH_MAX] = {0};
|
||||
snprintf(file, sizeof(file), "%s%s%s.json", path, TD_DIRSEP, name);
|
||||
|
||||
if (taosStatFile(file, NULL, NULL) < 0) {
|
||||
if (taosStatFile(file, NULL, NULL, NULL) < 0) {
|
||||
dInfo("file:%s not exist", file);
|
||||
code = 0;
|
||||
goto _OVER;
|
||||
|
|
|
@ -666,7 +666,12 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) {
|
|||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
#ifdef WINDOWS
|
||||
if (taosArrayGetSize(createReq.pRetensions) > 0) {
|
||||
terrno = TSDB_CODE_MND_INVALID_PLATFORM;
|
||||
goto _OVER;
|
||||
}
|
||||
#endif
|
||||
mInfo("db:%s, start to create, vgroups:%d", createReq.db, createReq.numOfVgroups);
|
||||
if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_DB, NULL) != 0) {
|
||||
goto _OVER;
|
||||
|
|
|
@ -359,7 +359,10 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) {
|
|||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
#ifdef WINDOWS
|
||||
terrno = TSDB_CODE_MND_INVALID_PLATFORM;
|
||||
goto _OVER;
|
||||
#endif
|
||||
mInfo("func:%s, start to create, size:%d", createReq.name, createReq.codeLen);
|
||||
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_FUNC) != 0) {
|
||||
goto _OVER;
|
||||
|
|
|
@ -79,9 +79,12 @@ int32_t mndInitIdx(SMnode *pMnode) {
|
|||
return sdbSetTable(pMnode->pSdb, table);
|
||||
}
|
||||
|
||||
static int32_t mndFindSuperTableTagId(const SStbObj *pStb, const char *tagName) {
|
||||
static int32_t mndFindSuperTableTagId(const SStbObj *pStb, const char *tagName, int8_t *hasIdx) {
|
||||
for (int32_t tag = 0; tag < pStb->numOfTags; tag++) {
|
||||
if (strcasecmp(pStb->pTags[tag].name, tagName) == 0) {
|
||||
if (IS_IDX_ON(&pStb->pTags[tag])) {
|
||||
*hasIdx = 1;
|
||||
}
|
||||
return tag;
|
||||
}
|
||||
}
|
||||
|
@ -597,7 +600,8 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb
|
|||
pNew->updateTime = taosGetTimestampMs();
|
||||
pNew->lock = 0;
|
||||
|
||||
int32_t tag = mndFindSuperTableTagId(pOld, tagName);
|
||||
int8_t hasIdx = 0;
|
||||
int32_t tag = mndFindSuperTableTagId(pOld, tagName, &hasIdx);
|
||||
if (tag < 0) {
|
||||
terrno = TSDB_CODE_MND_TAG_NOT_EXIST;
|
||||
return -1;
|
||||
|
@ -612,14 +616,14 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb
|
|||
SSchema *pTag = pNew->pTags + tag;
|
||||
|
||||
if (on == 1) {
|
||||
if (IS_IDX_ON(pTag)) {
|
||||
if (hasIdx && tag != 0) {
|
||||
terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
|
||||
return -1;
|
||||
} else {
|
||||
SSCHMEA_SET_IDX_ON(pTag);
|
||||
}
|
||||
} else {
|
||||
if (!IS_IDX_ON(pTag)) {
|
||||
if (hasIdx == 0) {
|
||||
terrno = TSDB_CODE_MND_SMA_NOT_EXIST;
|
||||
} else {
|
||||
SSCHMEA_SET_IDX_OFF(pTag);
|
||||
|
@ -667,7 +671,42 @@ _OVER:
|
|||
mndTransDrop(pTrans);
|
||||
return code;
|
||||
}
|
||||
int8_t mndCheckIndexNameByTagName(SMnode *pMnode, SIdxObj *pIdxObj) {
|
||||
// build index on first tag, and no index name;
|
||||
int8_t exist = 0;
|
||||
SDbObj *pDb = NULL;
|
||||
if (strlen(pIdxObj->db) > 0) {
|
||||
pDb = mndAcquireDb(pMnode, pIdxObj->db);
|
||||
if (pDb == NULL) return 0;
|
||||
}
|
||||
SSmaAndTagIter *pIter = NULL;
|
||||
SIdxObj *pIdx = NULL;
|
||||
SSdb *pSdb = pMnode->pSdb;
|
||||
|
||||
while (1) {
|
||||
pIter = sdbFetch(pSdb, SDB_IDX, pIter, (void **)&pIdx);
|
||||
if (pIter == NULL) break;
|
||||
|
||||
if (NULL != pDb && pIdx->dbUid != pDb->uid) {
|
||||
sdbRelease(pSdb, pIdx);
|
||||
continue;
|
||||
}
|
||||
if (pIdxObj->stbUid != pIdx->stbUid) {
|
||||
sdbRelease(pSdb, pIdx);
|
||||
continue;
|
||||
}
|
||||
if (strncmp(pIdxObj->colName, pIdx->colName, TSDB_COL_NAME_LEN) == 0) {
|
||||
sdbCancelFetch(pSdb, pIter);
|
||||
sdbRelease(pSdb, pIdx);
|
||||
exist = 1;
|
||||
break;
|
||||
}
|
||||
sdbRelease(pSdb, pIdx);
|
||||
}
|
||||
|
||||
mndReleaseDb(pMnode, pDb);
|
||||
return exist;
|
||||
}
|
||||
static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *req, SDbObj *pDb, SStbObj *pStb) {
|
||||
int32_t code = -1;
|
||||
SIdxObj idxObj = {0};
|
||||
|
@ -681,11 +720,20 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re
|
|||
idxObj.stbUid = pStb->uid;
|
||||
idxObj.dbUid = pStb->dbUid;
|
||||
|
||||
int32_t tag = mndFindSuperTableTagId(pStb, req->colName);
|
||||
int8_t hasIdx = 0;
|
||||
int32_t tag = mndFindSuperTableTagId(pStb, req->colName, &hasIdx);
|
||||
if (tag < 0) {
|
||||
terrno = TSDB_CODE_MND_TAG_NOT_EXIST;
|
||||
return -1;
|
||||
} else if (tag == 0) {
|
||||
}
|
||||
int8_t exist = 0;
|
||||
if (tag == 0 && hasIdx == 1) {
|
||||
exist = mndCheckIndexNameByTagName(pMnode, &idxObj);
|
||||
if (exist) {
|
||||
terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
|
||||
return -1;
|
||||
}
|
||||
} else if (hasIdx == 1) {
|
||||
terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
|
||||
return -1;
|
||||
}
|
||||
|
@ -695,11 +743,11 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re
|
|||
return -1;
|
||||
}
|
||||
|
||||
SSchema *pTag = pStb->pTags + tag;
|
||||
if (IS_IDX_ON(pTag)) {
|
||||
terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
|
||||
return -1;
|
||||
}
|
||||
// SSchema *pTag = pStb->pTags + tag;
|
||||
// if (IS_IDX_ON(pTag)) {
|
||||
// terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
|
||||
// return -1;
|
||||
// }
|
||||
code = mndAddIndexImpl(pMnode, pReq, pDb, pStb, &idxObj);
|
||||
|
||||
return code;
|
||||
|
@ -806,8 +854,8 @@ int32_t mndDropIdxsByStb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p
|
|||
|
||||
if (pIdx->stbUid == pStb->uid) {
|
||||
if (mndSetDropIdxCommitLogs(pMnode, pTrans, pIdx) != 0) {
|
||||
sdbCancelFetch(pSdb, pIter);
|
||||
sdbRelease(pSdb, pIdx);
|
||||
sdbCancelFetch(pSdb, pIdx);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,8 +58,10 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
|
|||
type = TSDB_MGMT_TABLE_DNODE;
|
||||
} else if (strncasecmp(name, TSDB_INS_TABLE_MNODES, len) == 0) {
|
||||
type = TSDB_MGMT_TABLE_MNODE;
|
||||
/*
|
||||
} else if (strncasecmp(name, TSDB_INS_TABLE_MODULES, len) == 0) {
|
||||
type = TSDB_MGMT_TABLE_MODULE;
|
||||
*/
|
||||
} else if (strncasecmp(name, TSDB_INS_TABLE_QNODES, len) == 0) {
|
||||
type = TSDB_MGMT_TABLE_QNODE;
|
||||
} else if (strncasecmp(name, TSDB_INS_TABLE_SNODES, len) == 0) {
|
||||
|
|
|
@ -705,7 +705,10 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) {
|
|||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
#ifdef WINDOWS
|
||||
terrno = TSDB_CODE_MND_INVALID_PLATFORM;
|
||||
goto _OVER;
|
||||
#endif
|
||||
mInfo("sma:%s, start to create", createReq.name);
|
||||
if (mndCheckCreateSmaReq(&createReq) != 0) {
|
||||
goto _OVER;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "mndDb.h"
|
||||
#include "mndDnode.h"
|
||||
#include "mndIndex.h"
|
||||
#include "mndIndexComm.h"
|
||||
#include "mndInfoSchema.h"
|
||||
#include "mndMnode.h"
|
||||
#include "mndPerfSchema.h"
|
||||
|
@ -822,7 +823,7 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
|
|||
return -1;
|
||||
}
|
||||
|
||||
if(pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags){
|
||||
if (pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags) {
|
||||
terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW;
|
||||
return -1;
|
||||
}
|
||||
|
@ -834,6 +835,7 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
|
|||
pSchema->bytes = pField->bytes;
|
||||
pSchema->flags = pField->flags;
|
||||
memcpy(pSchema->name, pField->name, TSDB_COL_NAME_LEN);
|
||||
memcpy(pSchema->comment, pField->comment, TSDB_COL_COMMENT_LEN);
|
||||
pSchema->colId = pDst->nextColId;
|
||||
pDst->nextColId++;
|
||||
}
|
||||
|
@ -847,6 +849,7 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat
|
|||
SSCHMEA_SET_IDX_ON(pSchema);
|
||||
}
|
||||
memcpy(pSchema->name, pField->name, TSDB_COL_NAME_LEN);
|
||||
memcpy(pSchema->comment, pField->comment, TSDB_COL_COMMENT_LEN);
|
||||
pSchema->colId = pDst->nextColId;
|
||||
pDst->nextColId++;
|
||||
}
|
||||
|
@ -857,11 +860,39 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea
|
|||
SStbObj stbObj = {0};
|
||||
int32_t code = -1;
|
||||
|
||||
char fullIdxName[TSDB_INDEX_FNAME_LEN * 2] = {0};
|
||||
|
||||
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "create-stb");
|
||||
if (pTrans == NULL) goto _OVER;
|
||||
|
||||
mInfo("trans:%d, used to create stb:%s", pTrans->id, pCreate->name);
|
||||
if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER;
|
||||
|
||||
char randStr[24] = {0};
|
||||
taosRandStr2(randStr, tListLen(randStr) - 1);
|
||||
SSchema *pSchema = &(stbObj.pTags[0]);
|
||||
sprintf(fullIdxName, "%s.%s_%s", pDb->name, pSchema->name, randStr);
|
||||
|
||||
SSIdx idx = {0};
|
||||
if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0 && idx.pIdx != NULL) {
|
||||
terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST;
|
||||
mndReleaseIdx(pMnode, idx.pIdx);
|
||||
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
SIdxObj idxObj = {0};
|
||||
memcpy(idxObj.name, fullIdxName, TSDB_INDEX_FNAME_LEN);
|
||||
memcpy(idxObj.stb, stbObj.name, TSDB_TABLE_FNAME_LEN);
|
||||
memcpy(idxObj.db, stbObj.db, TSDB_DB_FNAME_LEN);
|
||||
memcpy(idxObj.colName, pSchema->name, TSDB_COL_NAME_LEN);
|
||||
idxObj.createdTime = taosGetTimestampMs();
|
||||
idxObj.uid = mndGenerateUid(fullIdxName, strlen(fullIdxName));
|
||||
idxObj.stbUid = stbObj.uid;
|
||||
idxObj.dbUid = stbObj.dbUid;
|
||||
|
||||
if (mndSetCreateIdxCommitLogs(pMnode, pTrans, &idxObj) < 0) goto _OVER;
|
||||
|
||||
if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) goto _OVER;
|
||||
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
|
||||
code = 0;
|
||||
|
@ -956,7 +987,7 @@ static int32_t mndBuildStbFromAlter(SStbObj *pStb, SStbObj *pDst, SMCreateStbReq
|
|||
return -1;
|
||||
}
|
||||
|
||||
if(pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags){
|
||||
if (pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags) {
|
||||
terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW;
|
||||
return -1;
|
||||
}
|
||||
|
@ -1188,7 +1219,7 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p
|
|||
return -1;
|
||||
}
|
||||
|
||||
if(pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ntags){
|
||||
if (pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ntags) {
|
||||
terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW;
|
||||
return -1;
|
||||
}
|
||||
|
@ -1478,7 +1509,8 @@ static int32_t mndAlterStbTagBytes(SMnode *pMnode, const SStbObj *pOld, SStbObj
|
|||
|
||||
SSchema *pTag = pNew->pTags + tag;
|
||||
|
||||
if (!(pTag->type == TSDB_DATA_TYPE_BINARY || pTag->type == TSDB_DATA_TYPE_NCHAR || pTag->type == TSDB_DATA_TYPE_GEOMETRY)) {
|
||||
if (!(pTag->type == TSDB_DATA_TYPE_BINARY || pTag->type == TSDB_DATA_TYPE_NCHAR ||
|
||||
pTag->type == TSDB_DATA_TYPE_GEOMETRY)) {
|
||||
terrno = TSDB_CODE_MND_INVALID_STB_OPTION;
|
||||
return -1;
|
||||
}
|
||||
|
@ -1506,7 +1538,7 @@ static int32_t mndAddSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, SArray
|
|||
return -1;
|
||||
}
|
||||
|
||||
if(pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ncols){
|
||||
if (pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ncols) {
|
||||
terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW;
|
||||
return -1;
|
||||
}
|
||||
|
@ -1598,7 +1630,8 @@ static int32_t mndAlterStbColumnBytes(SMnode *pMnode, const SStbObj *pOld, SStbO
|
|||
}
|
||||
|
||||
SSchema *pCol = pNew->pColumns + col;
|
||||
if (!(pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR || pCol->type == TSDB_DATA_TYPE_GEOMETRY)) {
|
||||
if (!(pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR ||
|
||||
pCol->type == TSDB_DATA_TYPE_GEOMETRY)) {
|
||||
terrno = TSDB_CODE_MND_INVALID_STB_OPTION;
|
||||
return -1;
|
||||
}
|
||||
|
@ -3182,7 +3215,6 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
|||
SSdb *pSdb = pMnode->pSdb;
|
||||
SStbObj *pStb = NULL;
|
||||
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
if (!pShow->sysDbRsp) {
|
||||
numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb);
|
||||
|
@ -3206,7 +3238,7 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
|
|||
if (pShow->pIter == NULL) break;
|
||||
} else {
|
||||
fetch = true;
|
||||
void *pKey = taosHashGetKey(pShow->pIter, NULL);
|
||||
void *pKey = taosHashGetKey(pShow->pIter, NULL);
|
||||
pStb = sdbAcquire(pSdb, SDB_STB, pKey);
|
||||
if (!pStb) continue;
|
||||
}
|
||||
|
|
|
@ -692,7 +692,10 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
|
|||
terrno = TSDB_CODE_INVALID_MSG;
|
||||
goto _OVER;
|
||||
}
|
||||
|
||||
#ifdef WINDOWS
|
||||
terrno = TSDB_CODE_MND_INVALID_PLATFORM;
|
||||
goto _OVER;
|
||||
#endif
|
||||
mInfo("stream:%s, start to create, sql:%s", createStreamReq.name, createStreamReq.sql);
|
||||
|
||||
if (mndCheckCreateStreamReq(&createStreamReq) != 0) {
|
||||
|
|
|
@ -489,7 +489,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
|
|||
SMqVgEp *pVgEp = taosArrayGetP(pConsumerEpNew->vgs, i);
|
||||
if(pVgEp->vgId == d1->vgId){
|
||||
jump = true;
|
||||
mInfo("pSub->offsetRows jump, because consumer id:%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId);
|
||||
mInfo("pSub->offsetRows jump, because consumer id:0x%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,6 +77,8 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) {
|
|||
pTask->chkInfo.version = ver;
|
||||
pTask->pMeta = pSnode->pMeta;
|
||||
|
||||
streamTaskOpenAllUpstreamInput(pTask);
|
||||
|
||||
pTask->pState = streamStateOpen(pSnode->path, pTask, false, -1, -1);
|
||||
if (pTask->pState == NULL) {
|
||||
return -1;
|
||||
|
|
|
@ -8,6 +8,7 @@ set(
|
|||
"src/vnd/vnodeCommit.c"
|
||||
"src/vnd/vnodeQuery.c"
|
||||
"src/vnd/vnodeModule.c"
|
||||
"src/vnd/vnodeCos.c"
|
||||
"src/vnd/vnodeSvr.c"
|
||||
"src/vnd/vnodeSync.c"
|
||||
"src/vnd/vnodeSnapshot.c"
|
||||
|
@ -155,6 +156,45 @@ target_link_libraries(
|
|||
PUBLIC index
|
||||
)
|
||||
|
||||
if(${TD_LINUX})
|
||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
|
||||
find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/)
|
||||
find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/)
|
||||
find_library(MINIXML_LIBRARY mxml)
|
||||
find_library(CURL_LIBRARY curl)
|
||||
target_link_libraries(
|
||||
vnode
|
||||
|
||||
# s3
|
||||
PUBLIC cos_c_sdk_static
|
||||
PUBLIC ${APR_UTIL_LIBRARY}
|
||||
PUBLIC ${APR_LIBRARY}
|
||||
PUBLIC ${MINIXML_LIBRARY}
|
||||
PUBLIC ${CURL_LIBRARY}
|
||||
)
|
||||
|
||||
# s3
|
||||
FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/)
|
||||
IF (APR_CONFIG_BIN)
|
||||
EXECUTE_PROCESS(
|
||||
COMMAND ${APR_CONFIG_BIN} --includedir
|
||||
OUTPUT_VARIABLE APR_INCLUDE_DIR
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
ENDIF()
|
||||
include_directories (${APR_INCLUDE_DIR})
|
||||
target_include_directories(
|
||||
vnode
|
||||
PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk"
|
||||
PUBLIC "$ENV{HOME}/.cos-local.1/include"
|
||||
)
|
||||
|
||||
if(${BUILD_WITH_COS})
|
||||
add_definitions(-DUSE_COS)
|
||||
endif(${BUILD_WITH_COS})
|
||||
|
||||
endif(${TD_LINUX})
|
||||
|
||||
IF (TD_GRANT)
|
||||
TARGET_LINK_LIBRARIES(vnode PUBLIC grant)
|
||||
ENDIF ()
|
||||
|
@ -169,8 +209,6 @@ if(${BUILD_WITH_ROCKSDB})
|
|||
add_definitions(-DUSE_ROCKSDB)
|
||||
endif(${BUILD_WITH_ROCKSDB})
|
||||
|
||||
|
||||
|
||||
if(${BUILD_TEST})
|
||||
add_subdirectory(test)
|
||||
endif(${BUILD_TEST})
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _TD_VND_COS_H_
|
||||
#define _TD_VND_COS_H_
|
||||
|
||||
#include "vnd.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern int8_t tsS3Enabled;
|
||||
|
||||
int32_t s3Init();
|
||||
void s3CleanUp();
|
||||
int32_t s3PutObjectFromFile(const char *file, const char *object);
|
||||
void s3DeleteObjects(const char *object_name[], int nobject);
|
||||
bool s3Exists(const char *object_name);
|
||||
bool s3Get(const char *object_name, const char *path);
|
||||
void s3EvictCache(const char *path, long object_size);
|
||||
long s3Size(const char *object_name);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /*_TD_VND_COS_H_*/
|
|
@ -167,7 +167,7 @@ int metaAddIndexToSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
|
|||
int metaDropIndexFromSTable(SMeta* pMeta, int64_t version, SDropIndexReq* pReq);
|
||||
|
||||
int64_t metaGetTimeSeriesNum(SMeta* pMeta);
|
||||
SMCtbCursor* metaOpenCtbCursor(SMeta* pMeta, tb_uid_t uid, int lock);
|
||||
SMCtbCursor* metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock);
|
||||
void metaCloseCtbCursor(SMCtbCursor* pCtbCur, int lock);
|
||||
tb_uid_t metaCtbCursorNext(SMCtbCursor* pCtbCur);
|
||||
SMStbCursor* metaOpenStbCursor(SMeta* pMeta, tb_uid_t uid);
|
||||
|
@ -250,7 +250,6 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
|
|||
int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskTransferStateReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskScanHistoryFinishReq(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqProcessTaskScanHistoryFinishRsp(STQ* pTq, SRpcMsg* pMsg);
|
||||
int32_t tqCheckLogInWal(STQ* pTq, int64_t version);
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
#include "osMemory.h"
|
||||
#include "tencode.h"
|
||||
|
||||
void _metaReaderInit(SMetaReader* pReader, void* pVnode, int32_t flags, SStoreMeta* pAPI) {
|
||||
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
|
||||
void _metaReaderInit(SMetaReader *pReader, void *pVnode, int32_t flags, SStoreMeta *pAPI) {
|
||||
SMeta *pMeta = ((SVnode *)pVnode)->pMeta;
|
||||
metaReaderDoInit(pReader, pMeta, flags);
|
||||
pReader->pAPI = pAPI;
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) {
|
|||
int metaGetTableNameByUid(void *pVnode, uint64_t uid, char *tbName) {
|
||||
int code = 0;
|
||||
SMetaReader mr = {0};
|
||||
metaReaderDoInit(&mr, ((SVnode*)pVnode)->pMeta, 0);
|
||||
metaReaderDoInit(&mr, ((SVnode *)pVnode)->pMeta, 0);
|
||||
code = metaReaderGetTableEntryByUid(&mr, uid);
|
||||
if (code < 0) {
|
||||
metaReaderClear(&mr);
|
||||
|
@ -195,7 +195,7 @@ int metaGetTableUidByName(void *pVnode, char *tbName, uint64_t *uid) {
|
|||
int metaGetTableTypeByName(void *pVnode, char *tbName, ETableType *tbType) {
|
||||
int code = 0;
|
||||
SMetaReader mr = {0};
|
||||
metaReaderDoInit(&mr, ((SVnode*)pVnode)->pMeta, 0);
|
||||
metaReaderDoInit(&mr, ((SVnode *)pVnode)->pMeta, 0);
|
||||
|
||||
code = metaGetTableEntryByName(&mr, tbName);
|
||||
if (code == 0) *tbType = mr.me.type;
|
||||
|
@ -244,7 +244,7 @@ SMTbCursor *metaOpenTbCursor(void *pVnode) {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
SVnode* pVnodeObj = pVnode;
|
||||
SVnode *pVnodeObj = pVnode;
|
||||
// tdbTbcMoveToFirst((TBC *)pTbCur->pDbc);
|
||||
pTbCur->pMeta = pVnodeObj->pMeta;
|
||||
pTbCur->paused = 1;
|
||||
|
@ -408,17 +408,9 @@ _err:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
struct SMCtbCursor {
|
||||
SMeta *pMeta;
|
||||
TBC *pCur;
|
||||
tb_uid_t suid;
|
||||
void *pKey;
|
||||
void *pVal;
|
||||
int kLen;
|
||||
int vLen;
|
||||
};
|
||||
|
||||
SMCtbCursor *metaOpenCtbCursor(SMeta *pMeta, tb_uid_t uid, int lock) {
|
||||
SMCtbCursor *metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock) {
|
||||
SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
|
||||
SMCtbCursor *pCtbCur = NULL;
|
||||
SCtbIdxKey ctbIdxKey;
|
||||
int ret = 0;
|
||||
|
@ -435,7 +427,7 @@ SMCtbCursor *metaOpenCtbCursor(SMeta *pMeta, tb_uid_t uid, int lock) {
|
|||
metaRLock(pMeta);
|
||||
}
|
||||
|
||||
ret = tdbTbcOpen(pMeta->pCtbIdx, &pCtbCur->pCur, NULL);
|
||||
ret = tdbTbcOpen(pMeta->pCtbIdx, (TBC**)&pCtbCur->pCur, NULL);
|
||||
if (ret < 0) {
|
||||
metaULock(pMeta);
|
||||
taosMemoryFree(pCtbCur);
|
||||
|
@ -1139,7 +1131,7 @@ int32_t metaFilterTtl(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
|
|||
pCursor->type = param->type;
|
||||
|
||||
metaRLock(pMeta);
|
||||
//ret = tdbTbcOpen(pMeta->pTtlIdx, &pCursor->pCur, NULL);
|
||||
// ret = tdbTbcOpen(pMeta->pTtlIdx, &pCursor->pCur, NULL);
|
||||
|
||||
END:
|
||||
if (pCursor->pMeta) metaULock(pCursor->pMeta);
|
||||
|
@ -1194,7 +1186,7 @@ int32_t metaFilterTableIds(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
|
|||
ret = -1;
|
||||
for (int i = 0; i < oStbEntry.stbEntry.schemaTag.nCols; i++) {
|
||||
SSchema *schema = oStbEntry.stbEntry.schemaTag.pSchema + i;
|
||||
if (schema->colId == param->cid && param->type == schema->type && (IS_IDX_ON(schema) || i == 0)) {
|
||||
if (schema->colId == param->cid && param->type == schema->type && (IS_IDX_ON(schema))) {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1373,7 +1365,7 @@ int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) {
|
|||
}
|
||||
|
||||
int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *pUidTagInfo) {
|
||||
SMCtbCursor *pCur = metaOpenCtbCursor(((SVnode *)pVnode)->pMeta, suid, 1);
|
||||
SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 1);
|
||||
|
||||
// If len > 0 means there already have uids, and we only want the
|
||||
// tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept
|
||||
|
|
|
@ -450,12 +450,13 @@ int metaAddIndexToSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
|
|||
goto _err;
|
||||
}
|
||||
if (IS_IDX_ON(pNew) && !IS_IDX_ON(pOld)) {
|
||||
if (diffIdx != -1) goto _err;
|
||||
// if (diffIdx != -1) goto _err;
|
||||
diffIdx = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (diffIdx == -1 || diffIdx == 0) {
|
||||
if (diffIdx == -1) {
|
||||
goto _err;
|
||||
}
|
||||
|
||||
|
@ -586,7 +587,7 @@ int metaDropIndexFromSTable(SMeta *pMeta, int64_t version, SDropIndexReq *pReq)
|
|||
for (int i = 0; i < oStbEntry.stbEntry.schemaTag.nCols; i++) {
|
||||
SSchema *schema = oStbEntry.stbEntry.schemaTag.pSchema + i;
|
||||
if (0 == strncmp(schema->name, pReq->colName, sizeof(pReq->colName))) {
|
||||
if (i != 0 || IS_IDX_ON(schema)) {
|
||||
if (IS_IDX_ON(schema)) {
|
||||
pCol = schema;
|
||||
}
|
||||
break;
|
||||
|
@ -2094,7 +2095,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
|
|||
} else {
|
||||
for (int i = 0; i < pTagSchema->nCols; i++) {
|
||||
pTagColumn = &pTagSchema->pSchema[i];
|
||||
if (i != 0 && !IS_IDX_ON(pTagColumn)) continue;
|
||||
if (!IS_IDX_ON(pTagColumn)) continue;
|
||||
|
||||
STagVal tagVal = {.cid = pTagColumn->colId};
|
||||
tTagGet((const STag *)pCtbEntry->ctbEntry.pTags, &tagVal);
|
||||
|
|
|
@ -905,6 +905,7 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, int64_t version, void *pReq, void *pMsg,
|
|||
tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
|
||||
if (tdExecuteRSmaAsync(pSma, version, pMsg, len, inputType, *pTbSuid) < 0) {
|
||||
smaError("vgId:%d, failed to process rsma submit exec 2 since: %s", SMA_VID(pSma), terrstr());
|
||||
taosHashCancelIterate(uidStore.uidHash, pIter);
|
||||
goto _err;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -930,6 +930,8 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
|
|||
pTask->pMsgCb = &pTq->pVnode->msgCb;
|
||||
pTask->pMeta = pTq->pStreamMeta;
|
||||
|
||||
streamTaskOpenAllUpstreamInput(pTask);
|
||||
|
||||
// backup the initial status, and set it to be TASK_STATUS__INIT
|
||||
pTask->chkInfo.version = ver;
|
||||
pTask->chkInfo.currentVer = ver;
|
||||
|
@ -1274,7 +1276,9 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
if (done) {
|
||||
pTask->tsInfo.step2Start = taosGetTimestampMs();
|
||||
streamTaskEndScanWAL(pTask);
|
||||
qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, 0.0);
|
||||
appendTranstateIntoInputQ(pTask);
|
||||
streamTryExec(pTask); // exec directly
|
||||
} else {
|
||||
STimeWindow* pWindow = &pTask->dataRange.window;
|
||||
tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64 " window:%" PRId64 "-%" PRId64
|
||||
|
@ -1339,44 +1343,6 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// notify the downstream tasks to transfer executor state after handle all history blocks.
|
||||
int32_t tqProcessTaskTransferStateReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t len = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
||||
SStreamTransferReq req = {0};
|
||||
|
||||
SDecoder decoder;
|
||||
tDecoderInit(&decoder, (uint8_t*)pReq, len);
|
||||
int32_t code = tDecodeStreamScanHistoryFinishReq(&decoder, &req);
|
||||
tDecoderClear(&decoder);
|
||||
|
||||
tqDebug("vgId:%d start to process transfer state msg, from s-task:0x%x", pTq->pStreamMeta->vgId, req.downstreamTaskId);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.downstreamTaskId);
|
||||
if (pTask == NULL) {
|
||||
tqError("failed to find task:0x%x, it may have been dropped already. process transfer state failed", req.downstreamTaskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t remain = streamAlignTransferState(pTask);
|
||||
if (remain > 0) {
|
||||
tqDebug("s-task:%s receive upstream transfer state msg, remain:%d", pTask->id.idStr, remain);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// transfer the ownership of executor state
|
||||
tqDebug("s-task:%s all upstream tasks send transfer msg, open transfer state flag", pTask->id.idStr);
|
||||
ASSERT(pTask->streamTaskId.taskId != 0 && pTask->info.fillHistory == 1);
|
||||
|
||||
pTask->status.transferState = true;
|
||||
|
||||
streamSchedExec(pTask);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t tqProcessTaskScanHistoryFinishReq(STQ* pTq, SRpcMsg* pMsg) {
|
||||
char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
@ -1564,7 +1530,7 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
|
|||
if (pTask) {
|
||||
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
|
||||
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
|
||||
return 0;
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else {
|
||||
tqDebug("vgId:%d failed to handle the dispatch rsp, since find task:0x%x failed", vgId, taskId);
|
||||
return TSDB_CODE_INVALID_MSG;
|
||||
|
@ -1708,6 +1674,8 @@ int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg) {
|
|||
|
||||
int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
|
||||
STQ* pTq = pVnode->pTq;
|
||||
int32_t vgId = pVnode->config.vgId;
|
||||
|
||||
SMsgHead* msgStr = pMsg->pCont;
|
||||
char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
|
||||
int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
|
||||
|
@ -1724,7 +1692,9 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
|
|||
tDecoderClear(&decoder);
|
||||
|
||||
int32_t taskId = req.taskId;
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.taskId);
|
||||
tqDebug("vgId:%d receive dispatch msg to s-task:0x%"PRIx64"-0x%x", vgId, req.streamId, taskId);
|
||||
|
||||
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, taskId);
|
||||
if (pTask != NULL) {
|
||||
SRpcMsg rsp = {.info = pMsg->info, .code = 0};
|
||||
streamProcessDispatchMsg(pTask, &req, &rsp, false);
|
||||
|
@ -1741,7 +1711,7 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
|
|||
|
||||
FAIL:
|
||||
if (pMsg->info.handle == NULL) {
|
||||
tqError("s-task:0x%x vgId:%d msg handle is null, abort enqueue dispatch msg", pTq->pStreamMeta->vgId, taskId);
|
||||
tqError("s-task:0x%x vgId:%d msg handle is null, abort enqueue dispatch msg", vgId, taskId);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ int32_t tqOffsetSnapRead(STqOffsetReader* pReader, uint8_t** ppData) {
|
|||
}
|
||||
|
||||
int64_t sz = 0;
|
||||
if (taosStatFile(fname, &sz, NULL) < 0) {
|
||||
if (taosStatFile(fname, &sz, NULL, NULL) < 0) {
|
||||
taosCloseFile(&pFile);
|
||||
taosMemoryFree(fname);
|
||||
return -1;
|
||||
|
|
|
@ -332,8 +332,12 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con
|
|||
void* pBody = POINTER_SHIFT(pReader->pHead->head.body, sizeof(SMsgHead));
|
||||
int32_t len = pReader->pHead->head.bodyLen - sizeof(SMsgHead);
|
||||
|
||||
extractDelDataBlock(pBody, len, ver, (SStreamRefDataBlock**)pItem);
|
||||
tqDebug("s-task:%s delete msg extract from WAL, len:%d, ver:%"PRId64, id, len, ver);
|
||||
code = extractDelDataBlock(pBody, len, ver, (SStreamRefDataBlock**)pItem);
|
||||
if (code != TSDB_CODE_SUCCESS) {
|
||||
tqError("s-task:%s extract delete msg from WAL failed, code:%s", id, tstrerror(code));
|
||||
} else {
|
||||
tqDebug("s-task:%s delete msg extract from WAL, len:%d, ver:%"PRId64, id, len, ver);
|
||||
}
|
||||
} else {
|
||||
ASSERT(0);
|
||||
}
|
||||
|
@ -1088,6 +1092,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
|
|||
if(ret != TDB_CODE_SUCCESS) {
|
||||
tqError("qGetTableList in tqUpdateTbUidList error:%d handle %s consumer:0x%" PRIx64, ret, pTqHandle->subKey, pTqHandle->consumerId);
|
||||
taosArrayDestroy(list);
|
||||
taosHashCancelIterate(pTq->pHandle, pIter);
|
||||
return ret;
|
||||
}
|
||||
tqReaderSetTbUidList(pTqHandle->execHandle.pTqReader, list, NULL);
|
||||
|
|
|
@ -210,13 +210,23 @@ int32_t doSetOffsetForWalReader(SStreamTask *pTask, int32_t vgId) {
|
|||
}
|
||||
|
||||
static void checkForFillHistoryVerRange(SStreamTask* pTask, int64_t ver) {
|
||||
if ((pTask->info.fillHistory == 1) && ver > pTask->dataRange.range.maxVer) {
|
||||
qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64
|
||||
", not scan wal anymore, set the transfer state flag",
|
||||
pTask->id.idStr, ver, pTask->dataRange.range.maxVer);
|
||||
pTask->status.transferState = true;
|
||||
const char* id = pTask->id.idStr;
|
||||
int64_t maxVer = pTask->dataRange.range.maxVer;
|
||||
|
||||
/*int32_t code = */streamSchedExec(pTask);
|
||||
if ((pTask->info.fillHistory == 1) && ver > pTask->dataRange.range.maxVer) {
|
||||
if (!pTask->status.appendTranstateBlock) {
|
||||
qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64
|
||||
", not scan wal anymore, add transfer-state block into inputQ",
|
||||
id, ver, maxVer);
|
||||
|
||||
double el = (taosGetTimestampMs() - pTask->tsInfo.step2Start) / 1000.0;
|
||||
qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, el);
|
||||
appendTranstateIntoInputQ(pTask);
|
||||
/*int32_t code = */streamSchedExec(pTask);
|
||||
} else {
|
||||
qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64 ", not scan wal",
|
||||
id, ver, maxVer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -262,7 +272,7 @@ int32_t createStreamTaskRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if ((pTask->info.fillHistory == 1) && pTask->status.transferState) {
|
||||
if ((pTask->info.fillHistory == 1) && pTask->status.appendTranstateBlock) {
|
||||
ASSERT(status == TASK_STATUS__NORMAL);
|
||||
// the maximum version of data in the WAL has reached already, the step2 is done
|
||||
tqDebug("s-task:%s fill-history reach the maximum ver:%" PRId64 ", not scan wal anymore", pTask->id.idStr,
|
||||
|
@ -277,6 +287,13 @@ int32_t createStreamTaskRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) {
|
|||
continue;
|
||||
}
|
||||
|
||||
// downstream task has blocked the output, stopped for a while
|
||||
if (pTask->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
|
||||
tqDebug("s-task:%s inputQ is blocked, do nothing", pTask->id.idStr);
|
||||
streamMetaReleaseTask(pStreamMeta, pTask);
|
||||
continue;
|
||||
}
|
||||
|
||||
*pScanIdle = false;
|
||||
|
||||
// seek the stored version and extract data from WAL
|
||||
|
|
|
@ -412,7 +412,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
|
|||
if (k == 0) {
|
||||
SColumnInfoData* pColData = taosArrayGet(pDataBlock->pDataBlock, dataIndex);
|
||||
void* colData = colDataGetData(pColData, j);
|
||||
tqTrace("tq sink pipe2, row %d, col %d ts %" PRId64, j, k, *(int64_t*)colData);
|
||||
tqDebug("tq sink pipe2, row %d, col %d ts %" PRId64, j, k, *(int64_t*)colData);
|
||||
}
|
||||
if (IS_SET_NULL(pCol)) {
|
||||
SColVal cv = COL_VAL_NULL(pCol->colId, pCol->type);
|
||||
|
|
|
@ -176,7 +176,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) {
|
|||
// SDelFile
|
||||
if (pTsdb->fs.pDelFile) {
|
||||
tsdbDelFileName(pTsdb, pTsdb->fs.pDelFile, fname);
|
||||
if (taosStatFile(fname, &size, NULL)) {
|
||||
if (taosStatFile(fname, &size, NULL, NULL)) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) {
|
|||
|
||||
// head =========
|
||||
tsdbHeadFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pHeadF, fname);
|
||||
if (taosStatFile(fname, &size, NULL)) {
|
||||
if (taosStatFile(fname, &size, NULL, NULL)) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
@ -206,7 +206,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) {
|
|||
|
||||
// data =========
|
||||
tsdbDataFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pDataF, fname);
|
||||
if (taosStatFile(fname, &size, NULL)) {
|
||||
if (taosStatFile(fname, &size, NULL, NULL)) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
@ -221,7 +221,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) {
|
|||
|
||||
// sma =============
|
||||
tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname);
|
||||
if (taosStatFile(fname, &size, NULL)) {
|
||||
if (taosStatFile(fname, &size, NULL, NULL)) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) {
|
|||
// stt ===========
|
||||
for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
|
||||
tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname);
|
||||
if (taosStatFile(fname, &size, NULL)) {
|
||||
if (taosStatFile(fname, &size, NULL, NULL)) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*/
|
||||
|
||||
#include "tsdb.h"
|
||||
#include "vndCos.h"
|
||||
|
||||
// =============== PAGE-WISE FILE ===============
|
||||
int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **ppFD) {
|
||||
|
@ -34,9 +35,24 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p
|
|||
pFD->flag = flag;
|
||||
pFD->pFD = taosOpenFile(path, flag);
|
||||
if (pFD->pFD == NULL) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
taosMemoryFree(pFD);
|
||||
goto _exit;
|
||||
const char *object_name = taosDirEntryBaseName((char *)path);
|
||||
long s3_size = s3Size(object_name);
|
||||
if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3_size > 0) {
|
||||
s3EvictCache(path, s3_size);
|
||||
s3Get(object_name, path);
|
||||
|
||||
pFD->pFD = taosOpenFile(path, flag);
|
||||
|
||||
if (pFD->pFD == NULL) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
taosMemoryFree(pFD);
|
||||
goto _exit;
|
||||
}
|
||||
} else {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
taosMemoryFree(pFD);
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
pFD->szPage = szPage;
|
||||
pFD->pgno = 0;
|
||||
|
@ -50,7 +66,7 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p
|
|||
|
||||
// not check file size when reading data files.
|
||||
if (flag != TD_FILE_READ) {
|
||||
if (taosStatFile(path, &pFD->szFile, NULL) < 0) {
|
||||
if (taosStatFile(path, &pFD->szFile, NULL, NULL) < 0) {
|
||||
code = TAOS_SYSTEM_ERROR(errno);
|
||||
taosMemoryFree(pFD->pBuf);
|
||||
taosCloseFile(&pFD->pFD);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include "tsdb.h"
|
||||
#include "tsdbFS2.h"
|
||||
#include "vndCos.h"
|
||||
|
||||
typedef struct {
|
||||
STsdb *tsdb;
|
||||
|
@ -41,6 +42,28 @@ static int32_t tsdbDoRemoveFileObject(SRTNer *rtner, const STFileObj *fobj) {
|
|||
return TARRAY2_APPEND(rtner->fopArr, op);
|
||||
}
|
||||
|
||||
static int32_t tsdbRemoveFileObjectS3(SRTNer *rtner, const STFileObj *fobj) {
|
||||
int32_t code = 0, lino = 0;
|
||||
|
||||
STFileOp op = {
|
||||
.optype = TSDB_FOP_REMOVE,
|
||||
.fid = fobj->f->fid,
|
||||
.of = fobj->f[0],
|
||||
};
|
||||
|
||||
code = TARRAY2_APPEND(rtner->fopArr, op);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
const char *object_name = taosDirEntryBaseName((char *)fobj->fname);
|
||||
s3DeleteObjects(&object_name, 1);
|
||||
|
||||
_exit:
|
||||
if (code) {
|
||||
TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsdbDoCopyFile(SRTNer *rtner, const STFileObj *from, const STFile *to) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
@ -76,6 +99,34 @@ _exit:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile *to) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
||||
char fname[TSDB_FILENAME_LEN];
|
||||
TdFilePtr fdFrom = NULL;
|
||||
TdFilePtr fdTo = NULL;
|
||||
|
||||
tsdbTFileName(rtner->tsdb, to, fname);
|
||||
|
||||
fdFrom = taosOpenFile(from->fname, TD_FILE_READ);
|
||||
if (fdFrom == NULL) code = terrno;
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
char *object_name = taosDirEntryBaseName(fname);
|
||||
code = s3PutObjectFromFile(from->fname, object_name);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
taosCloseFile(&fdFrom);
|
||||
|
||||
_exit:
|
||||
if (code) {
|
||||
TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code);
|
||||
taosCloseFile(&fdFrom);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsdbDoMigrateFileObj(SRTNer *rtner, const STFileObj *fobj, const SDiskID *did) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
|
@ -123,6 +174,53 @@ _exit:
|
|||
return code;
|
||||
}
|
||||
|
||||
static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const SDiskID *did) {
|
||||
int32_t code = 0;
|
||||
int32_t lino = 0;
|
||||
STFileOp op = {0};
|
||||
|
||||
// remove old
|
||||
op = (STFileOp){
|
||||
.optype = TSDB_FOP_REMOVE,
|
||||
.fid = fobj->f->fid,
|
||||
.of = fobj->f[0],
|
||||
};
|
||||
|
||||
code = TARRAY2_APPEND(rtner->fopArr, op);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
// create new
|
||||
op = (STFileOp){
|
||||
.optype = TSDB_FOP_CREATE,
|
||||
.fid = fobj->f->fid,
|
||||
.nf =
|
||||
{
|
||||
.type = fobj->f->type,
|
||||
.did = did[0],
|
||||
.fid = fobj->f->fid,
|
||||
.cid = fobj->f->cid,
|
||||
.size = fobj->f->size,
|
||||
.stt[0] =
|
||||
{
|
||||
.level = fobj->f->stt[0].level,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
code = TARRAY2_APPEND(rtner->fopArr, op);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
// do copy the file
|
||||
code = tsdbCopyFileS3(rtner, fobj, &op.nf);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
_exit:
|
||||
if (code) {
|
||||
TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
STsdb *tsdb;
|
||||
int32_t sync;
|
||||
|
@ -201,8 +299,14 @@ static int32_t tsdbDoRetention2(void *arg) {
|
|||
for (int32_t ftype = 0; (ftype < TSDB_FTYPE_MAX) && (fobj = rtner->ctx->fset->farr[ftype], 1); ++ftype) {
|
||||
if (fobj == NULL) continue;
|
||||
|
||||
code = tsdbDoRemoveFileObject(rtner, fobj);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
int32_t nlevel = tfsGetLevel(rtner->tsdb->pVnode->pTfs);
|
||||
if (tsS3Enabled && nlevel > 1 && TSDB_FTYPE_DATA == ftype && fobj->f->did.level == nlevel - 1) {
|
||||
code = tsdbRemoveFileObjectS3(rtner, fobj);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
} else {
|
||||
code = tsdbDoRemoveFileObject(rtner, fobj);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
}
|
||||
|
||||
SSttLvl *lvl;
|
||||
|
@ -228,8 +332,15 @@ static int32_t tsdbDoRetention2(void *arg) {
|
|||
if (fobj == NULL) continue;
|
||||
|
||||
if (fobj->f->did.level == did.level) continue;
|
||||
code = tsdbDoMigrateFileObj(rtner, fobj, &did);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
int32_t nlevel = tfsGetLevel(rtner->tsdb->pVnode->pTfs);
|
||||
if (tsS3Enabled && nlevel > 1 && TSDB_FTYPE_DATA == ftype && did.level == nlevel - 1) {
|
||||
code = tsdbMigrateDataFileS3(rtner, fobj, &did);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
} else {
|
||||
code = tsdbDoMigrateFileObj(rtner, fobj, &did);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
}
|
||||
}
|
||||
|
||||
// stt
|
||||
|
@ -281,4 +392,4 @@ int32_t tsdbRetention(STsdb *tsdb, int64_t now, int32_t sync) {
|
|||
tsdbFreeRtnArg(arg);
|
||||
}
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -392,6 +392,9 @@ static int32_t tsdbSnapReadTombData(STsdbSnapReader* reader, uint8_t** data) {
|
|||
code = tTombBlockPut(reader->tombBlock, record);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
code = tsdbIterMergerNext(reader->tombIterMerger);
|
||||
TSDB_CHECK_CODE(code, lino, _exit);
|
||||
|
||||
if (TOMB_BLOCK_SIZE(reader->tombBlock) >= 81920) {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq2 *pMsg) {
|
|||
int32_t code = 0;
|
||||
STsdbKeepCfg *pCfg = &pTsdb->keepCfg;
|
||||
TSKEY now = taosGetTimestamp(pCfg->precision);
|
||||
TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep2;
|
||||
TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep1;
|
||||
TSKEY maxKey = tsMaxKeyByPrecision[pCfg->precision];
|
||||
int32_t size = taosArrayGetSize(pMsg->aSubmitTbData);
|
||||
|
||||
|
@ -107,4 +107,4 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq2 *pMsg) {
|
|||
|
||||
_exit:
|
||||
return code;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,323 @@
|
|||
#define ALLOW_FORBID_FUNC
|
||||
|
||||
#include "vndCos.h"
|
||||
|
||||
extern char tsS3Endpoint[];
|
||||
extern char tsS3AccessKeyId[];
|
||||
extern char tsS3AccessKeySecret[];
|
||||
extern char tsS3BucketName[];
|
||||
extern char tsS3AppId[];
|
||||
|
||||
#ifdef USE_COS
|
||||
#include "cos_api.h"
|
||||
#include "cos_http_io.h"
|
||||
#include "cos_log.h"
|
||||
|
||||
int32_t s3Init() {
|
||||
if (cos_http_io_initialize(NULL, 0) != COSE_OK) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// set log level, default COS_LOG_WARN
|
||||
cos_log_set_level(COS_LOG_WARN);
|
||||
|
||||
// set log output, default stderr
|
||||
cos_log_set_output(NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void s3CleanUp() { cos_http_io_deinitialize(); }
|
||||
|
||||
static void log_status(cos_status_t *s) {
|
||||
cos_warn_log("status->code: %d", s->code);
|
||||
if (s->error_code) cos_warn_log("status->error_code: %s", s->error_code);
|
||||
if (s->error_msg) cos_warn_log("status->error_msg: %s", s->error_msg);
|
||||
if (s->req_id) cos_warn_log("status->req_id: %s", s->req_id);
|
||||
}
|
||||
|
||||
static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) {
|
||||
options->config = cos_config_create(options->pool);
|
||||
|
||||
cos_config_t *config = options->config;
|
||||
|
||||
cos_str_set(&config->endpoint, tsS3Endpoint);
|
||||
cos_str_set(&config->access_key_id, tsS3AccessKeyId);
|
||||
cos_str_set(&config->access_key_secret, tsS3AccessKeySecret);
|
||||
cos_str_set(&config->appid, tsS3AppId);
|
||||
|
||||
config->is_cname = is_cname;
|
||||
|
||||
options->ctl = cos_http_controller_create(options->pool, 0);
|
||||
}
|
||||
|
||||
int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) {
|
||||
int32_t code = 0;
|
||||
cos_pool_t *p = NULL;
|
||||
int is_cname = 0;
|
||||
cos_status_t *s = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
cos_string_t bucket, object, file;
|
||||
cos_table_t *resp_headers;
|
||||
int traffic_limit = 0;
|
||||
|
||||
cos_pool_create(&p, NULL);
|
||||
options = cos_request_options_create(p);
|
||||
s3InitRequestOptions(options, is_cname);
|
||||
cos_table_t *headers = NULL;
|
||||
if (traffic_limit) {
|
||||
// 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误
|
||||
headers = cos_table_make(p, 1);
|
||||
cos_table_add_int(headers, "x-cos-traffic-limit", 819200);
|
||||
}
|
||||
cos_str_set(&bucket, tsS3BucketName);
|
||||
cos_str_set(&file, file_str);
|
||||
cos_str_set(&object, object_str);
|
||||
s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers);
|
||||
log_status(s);
|
||||
|
||||
cos_pool_destroy(p);
|
||||
|
||||
if (s->code != 200) {
|
||||
return code = s->code;
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
void s3DeleteObjects(const char *object_name[], int nobject) {
|
||||
cos_pool_t *p = NULL;
|
||||
int is_cname = 0;
|
||||
cos_string_t bucket;
|
||||
cos_table_t *resp_headers = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
cos_list_t object_list;
|
||||
cos_list_t deleted_object_list;
|
||||
int is_quiet = COS_TRUE;
|
||||
|
||||
cos_pool_create(&p, NULL);
|
||||
options = cos_request_options_create(p);
|
||||
s3InitRequestOptions(options, is_cname);
|
||||
cos_str_set(&bucket, tsS3BucketName);
|
||||
|
||||
cos_list_init(&object_list);
|
||||
cos_list_init(&deleted_object_list);
|
||||
|
||||
for (int i = 0; i < nobject; ++i) {
|
||||
cos_object_key_t *content = cos_create_cos_object_key(p);
|
||||
cos_str_set(&content->key, object_name[i]);
|
||||
cos_list_add_tail(&content->node, &object_list);
|
||||
}
|
||||
|
||||
cos_status_t *s = cos_delete_objects(options, &bucket, &object_list, is_quiet, &resp_headers, &deleted_object_list);
|
||||
log_status(s);
|
||||
|
||||
cos_pool_destroy(p);
|
||||
|
||||
if (cos_status_is_ok(s)) {
|
||||
cos_warn_log("delete objects succeeded\n");
|
||||
} else {
|
||||
cos_warn_log("delete objects failed\n");
|
||||
}
|
||||
}
|
||||
|
||||
bool s3Exists(const char *object_name) {
|
||||
bool ret = false;
|
||||
cos_pool_t *p = NULL;
|
||||
int is_cname = 0;
|
||||
cos_status_t *s = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
cos_string_t bucket;
|
||||
cos_string_t object;
|
||||
cos_table_t *resp_headers;
|
||||
cos_table_t *headers = NULL;
|
||||
cos_object_exist_status_e object_exist;
|
||||
|
||||
cos_pool_create(&p, NULL);
|
||||
options = cos_request_options_create(p);
|
||||
s3InitRequestOptions(options, is_cname);
|
||||
cos_str_set(&bucket, tsS3BucketName);
|
||||
cos_str_set(&object, object_name);
|
||||
|
||||
s = cos_check_object_exist(options, &bucket, &object, headers, &object_exist, &resp_headers);
|
||||
if (object_exist == COS_OBJECT_NON_EXIST) {
|
||||
cos_warn_log("object: %.*s non exist.\n", object.len, object.data);
|
||||
} else if (object_exist == COS_OBJECT_EXIST) {
|
||||
ret = true;
|
||||
cos_warn_log("object: %.*s exist.\n", object.len, object.data);
|
||||
} else {
|
||||
cos_warn_log("object: %.*s unknown status.\n", object.len, object.data);
|
||||
log_status(s);
|
||||
}
|
||||
|
||||
cos_pool_destroy(p);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool s3Get(const char *object_name, const char *path) {
|
||||
bool ret = false;
|
||||
cos_pool_t *p = NULL;
|
||||
int is_cname = 0;
|
||||
cos_status_t *s = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
cos_string_t bucket;
|
||||
cos_string_t object;
|
||||
cos_string_t file;
|
||||
cos_table_t *resp_headers = NULL;
|
||||
cos_table_t *headers = NULL;
|
||||
int traffic_limit = 0;
|
||||
|
||||
//创建内存池
|
||||
cos_pool_create(&p, NULL);
|
||||
|
||||
//初始化请求选项
|
||||
options = cos_request_options_create(p);
|
||||
s3InitRequestOptions(options, is_cname);
|
||||
cos_str_set(&bucket, tsS3BucketName);
|
||||
if (traffic_limit) {
|
||||
//限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误
|
||||
headers = cos_table_make(p, 1);
|
||||
cos_table_add_int(headers, "x-cos-traffic-limit", 819200);
|
||||
}
|
||||
|
||||
//下载对象
|
||||
cos_str_set(&file, path);
|
||||
cos_str_set(&object, object_name);
|
||||
s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers);
|
||||
if (cos_status_is_ok(s)) {
|
||||
ret = true;
|
||||
cos_warn_log("get object succeeded\n");
|
||||
} else {
|
||||
cos_warn_log("get object failed\n");
|
||||
}
|
||||
|
||||
//销毁内存池
|
||||
cos_pool_destroy(p);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
int64_t size;
|
||||
int32_t atime;
|
||||
char name[TSDB_FILENAME_LEN];
|
||||
} SEvictFile;
|
||||
|
||||
static int32_t evictFileCompareAsce(const void *pLeft, const void *pRight) {
|
||||
SEvictFile *lhs = (SEvictFile *)pLeft;
|
||||
SEvictFile *rhs = (SEvictFile *)pRight;
|
||||
return lhs->atime < rhs->atime ? -1 : 1;
|
||||
}
|
||||
|
||||
void s3EvictCache(const char *path, long object_size) {
|
||||
SDiskSize disk_size = {0};
|
||||
char dir_name[TSDB_FILENAME_LEN] = "\0";
|
||||
|
||||
tstrncpy(dir_name, path, TSDB_FILENAME_LEN);
|
||||
taosDirName(dir_name);
|
||||
|
||||
if (taosGetDiskSize((char *)dir_name, &disk_size) < 0) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
vError("failed to get disk:%s size since %s", path, terrstr());
|
||||
return;
|
||||
}
|
||||
|
||||
if (object_size >= disk_size.avail - (1 << 30)) {
|
||||
// evict too old files
|
||||
// 1, list data files' atime under dir(path)
|
||||
tdbDirPtr pDir = taosOpenDir(dir_name);
|
||||
if (pDir == NULL) {
|
||||
terrno = TAOS_SYSTEM_ERROR(errno);
|
||||
vError("failed to open %s since %s", dir_name, terrstr());
|
||||
}
|
||||
SArray *evict_files = taosArrayInit(16, sizeof(SEvictFile));
|
||||
tdbDirEntryPtr pDirEntry;
|
||||
while ((pDirEntry = taosReadDir(pDir)) != NULL) {
|
||||
char *name = taosGetDirEntryName(pDirEntry);
|
||||
if (!strncmp(name + strlen(name) - 5, ".data", 5)) {
|
||||
SEvictFile e_file = {0};
|
||||
char entry_name[TSDB_FILENAME_LEN] = "\0";
|
||||
int dir_len = strlen(dir_name);
|
||||
|
||||
memcpy(e_file.name, dir_name, dir_len);
|
||||
e_file.name[dir_len] = '/';
|
||||
memcpy(e_file.name + dir_len + 1, name, strlen(name));
|
||||
|
||||
taosStatFile(e_file.name, &e_file.size, NULL, &e_file.atime);
|
||||
|
||||
taosArrayPush(evict_files, &e_file);
|
||||
}
|
||||
}
|
||||
taosCloseDir(&pDir);
|
||||
|
||||
// 2, sort by atime
|
||||
taosArraySort(evict_files, evictFileCompareAsce);
|
||||
|
||||
// 3, remove files ascendingly until we get enough object_size space
|
||||
long evict_size = 0;
|
||||
size_t ef_size = TARRAY_SIZE(evict_files);
|
||||
for (size_t i = 0; i < ef_size; ++i) {
|
||||
SEvictFile *evict_file = taosArrayGet(evict_files, i);
|
||||
taosRemoveFile(evict_file->name);
|
||||
evict_size += evict_file->size;
|
||||
if (evict_size >= object_size) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
taosArrayDestroy(evict_files);
|
||||
}
|
||||
}
|
||||
|
||||
long s3Size(const char *object_name) {
|
||||
long size = 0;
|
||||
|
||||
cos_pool_t *p = NULL;
|
||||
int is_cname = 0;
|
||||
cos_status_t *s = NULL;
|
||||
cos_request_options_t *options = NULL;
|
||||
cos_string_t bucket;
|
||||
cos_string_t object;
|
||||
cos_table_t *resp_headers = NULL;
|
||||
|
||||
//创建内存池
|
||||
cos_pool_create(&p, NULL);
|
||||
|
||||
//初始化请求选项
|
||||
options = cos_request_options_create(p);
|
||||
s3InitRequestOptions(options, is_cname);
|
||||
cos_str_set(&bucket, tsS3BucketName);
|
||||
|
||||
//获取对象元数据
|
||||
cos_str_set(&object, object_name);
|
||||
s = cos_head_object(options, &bucket, &object, NULL, &resp_headers);
|
||||
// print_headers(resp_headers);
|
||||
if (cos_status_is_ok(s)) {
|
||||
char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH);
|
||||
if (content_length_str != NULL) {
|
||||
size = atol(content_length_str);
|
||||
}
|
||||
cos_warn_log("head object succeeded: %ld\n", size);
|
||||
} else {
|
||||
cos_warn_log("head object failed\n");
|
||||
}
|
||||
|
||||
//销毁内存池
|
||||
cos_pool_destroy(p);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
int32_t s3Init() { return 0; }
|
||||
void s3CleanUp() {}
|
||||
int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; }
|
||||
void s3DeleteObjects(const char *object_name[], int nobject) {}
|
||||
bool s3Exists(const char *object_name) { return false; }
|
||||
bool s3Get(const char *object_name, const char *path) { return false; }
|
||||
void s3EvictCache(const char *path, long object_size) {}
|
||||
long s3Size(const char *object_name) { return 0; }
|
||||
|
||||
#endif
|
|
@ -96,6 +96,10 @@ void initMetadataAPI(SStoreMeta* pMeta) {
|
|||
|
||||
pMeta->metaGetCachedTbGroup = metaGetCachedTbGroup;
|
||||
pMeta->metaPutTbGroupToCache = metaPutTbGroupToCache;
|
||||
|
||||
pMeta->openCtbCursor = metaOpenCtbCursor;
|
||||
pMeta->closeCtbCursor = metaCloseCtbCursor;
|
||||
pMeta->ctbCursorNext = metaCtbCursorNext;
|
||||
}
|
||||
|
||||
void initTqAPI(SStoreTqReader* pTq) {
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*/
|
||||
|
||||
#include "vnd.h"
|
||||
#include "vndCos.h"
|
||||
|
||||
typedef struct SVnodeTask SVnodeTask;
|
||||
struct SVnodeTask {
|
||||
|
@ -81,6 +82,9 @@ int vnodeInit(int nthreads) {
|
|||
if (tqInit() < 0) {
|
||||
return -1;
|
||||
}
|
||||
if (s3Init() < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -112,6 +116,7 @@ void vnodeCleanup() {
|
|||
walCleanUp();
|
||||
tqCleanUp();
|
||||
smaCleanUp();
|
||||
s3CleanUp();
|
||||
}
|
||||
|
||||
int vnodeScheduleTaskEx(int tpid, int (*execute)(void*), void* arg) {
|
||||
|
|
|
@ -216,7 +216,7 @@ int vnodeGetTableCfg(SVnode *pVnode, SRpcMsg *pMsg, bool direct) {
|
|||
|
||||
cfgRsp.numOfTags = schemaTag.nCols;
|
||||
cfgRsp.numOfColumns = schema.nCols;
|
||||
cfgRsp.pSchemas = (SSchema *)taosMemoryMalloc(sizeof(SSchema) * (cfgRsp.numOfColumns + cfgRsp.numOfTags));
|
||||
cfgRsp.pSchemas = (SSchema *)taosMemoryCalloc(cfgRsp.numOfColumns + cfgRsp.numOfTags, sizeof(SSchema));
|
||||
|
||||
memcpy(cfgRsp.pSchemas, schema.pSchema, sizeof(SSchema) * schema.nCols);
|
||||
if (schemaTag.nCols) {
|
||||
|
@ -443,7 +443,7 @@ int32_t vnodeGetTableList(void* pVnode, int8_t type, SArray* pList) {
|
|||
}
|
||||
|
||||
int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list) {
|
||||
SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, uid, 1);
|
||||
SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, uid, 1);
|
||||
|
||||
while (1) {
|
||||
tb_uid_t id = metaCtbCursorNext(pCur);
|
||||
|
@ -465,7 +465,7 @@ int32_t vnodeGetCtbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo
|
|||
|
||||
int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list) {
|
||||
SVnode *pVnodeObj = pVnode;
|
||||
SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj->pMeta, suid, 1);
|
||||
SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj, suid, 1);
|
||||
|
||||
while (1) {
|
||||
tb_uid_t id = metaCtbCursorNext(pCur);
|
||||
|
@ -524,7 +524,7 @@ int32_t vnodeGetStbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo
|
|||
}
|
||||
|
||||
int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) {
|
||||
SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, suid, 0);
|
||||
SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 0);
|
||||
if (!pCur) {
|
||||
return TSDB_CODE_FAILED;
|
||||
}
|
||||
|
|
|
@ -661,8 +661,6 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo)
|
|||
return tqProcessTaskRetrieveRsp(pVnode->pTq, pMsg);
|
||||
case TDMT_VND_STREAM_SCAN_HISTORY:
|
||||
return tqProcessTaskScanHistory(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_TRANSFER_STATE:
|
||||
return tqProcessTaskTransferStateReq(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_SCAN_HISTORY_FINISH:
|
||||
return tqProcessTaskScanHistoryFinishReq(pVnode->pTq, pMsg);
|
||||
case TDMT_STREAM_SCAN_HISTORY_FINISH_RSP:
|
||||
|
|
|
@ -78,6 +78,10 @@ static int32_t buildDescResultDataBlock(SSDataBlock** pOutput) {
|
|||
infoData = createColumnInfoData(TSDB_DATA_TYPE_VARCHAR, DESCRIBE_RESULT_NOTE_LEN, 4);
|
||||
code = blockDataAppendColInfo(pBlock, &infoData);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
infoData = createColumnInfoData(TSDB_DATA_TYPE_VARCHAR, DESCRIBE_RESULT_COL_COMMENT_LEN, 5);
|
||||
code = blockDataAppendColInfo(pBlock, &infoData);
|
||||
}
|
||||
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
*pOutput = pBlock;
|
||||
|
@ -99,7 +103,9 @@ static int32_t setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock,
|
|||
SColumnInfoData* pCol3 = taosArrayGet(pBlock->pDataBlock, 2);
|
||||
// Note
|
||||
SColumnInfoData* pCol4 = taosArrayGet(pBlock->pDataBlock, 3);
|
||||
char buf[DESCRIBE_RESULT_FIELD_LEN] = {0};
|
||||
// Comment
|
||||
SColumnInfoData* pCol5 = taosArrayGet(pBlock->pDataBlock, 4);
|
||||
char buf[DESCRIBE_RESULT_COL_COMMENT_LEN + VARSTR_HEADER_SIZE] = {0};
|
||||
for (int32_t i = 0; i < numOfRows; ++i) {
|
||||
if (invisibleColumn(sysInfoUser, pMeta->tableType, pMeta->schema[i].flags)) {
|
||||
continue;
|
||||
|
@ -112,6 +118,8 @@ static int32_t setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock,
|
|||
colDataSetVal(pCol3, pBlock->info.rows, (const char*)&bytes, false);
|
||||
STR_TO_VARSTR(buf, i >= pMeta->tableInfo.numOfColumns ? "TAG" : "");
|
||||
colDataSetVal(pCol4, pBlock->info.rows, buf, false);
|
||||
STR_TO_VARSTR(buf, pMeta->schema[i].comment);
|
||||
colDataSetVal(pCol5, pBlock->info.rows, buf, false);
|
||||
++(pBlock->info.rows);
|
||||
}
|
||||
if (pBlock->info.rows <= 0) {
|
||||
|
@ -456,14 +464,19 @@ void appendColumnFields(char* buf, int32_t* len, STableCfg* pCfg) {
|
|||
for (int32_t i = 0; i < pCfg->numOfColumns; ++i) {
|
||||
SSchema* pSchema = pCfg->pSchemas + i;
|
||||
char type[32];
|
||||
char comments[TSDB_COL_COMMENT_LEN + 16] = {0};
|
||||
sprintf(type, "%s", tDataTypes[pSchema->type].name);
|
||||
if (TSDB_DATA_TYPE_VARCHAR == pSchema->type || TSDB_DATA_TYPE_GEOMETRY == pSchema->type) {
|
||||
sprintf(type + strlen(type), "(%d)", (int32_t)(pSchema->bytes - VARSTR_HEADER_SIZE));
|
||||
} else if (TSDB_DATA_TYPE_NCHAR == pSchema->type) {
|
||||
sprintf(type + strlen(type), "(%d)", (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE));
|
||||
}
|
||||
if (pSchema->comment[0]) {
|
||||
sprintf(comments, " COMMENT '%s'", pSchema->comment);
|
||||
}
|
||||
|
||||
*len += sprintf(buf + VARSTR_HEADER_SIZE + *len, "%s`%s` %s", ((i > 0) ? ", " : ""), pSchema->name, type);
|
||||
*len +=
|
||||
sprintf(buf + VARSTR_HEADER_SIZE + *len, "%s`%s` %s%s", ((i > 0) ? ", " : ""), pSchema->name, type, comments);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -471,14 +484,18 @@ void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) {
|
|||
for (int32_t i = 0; i < pCfg->numOfTags; ++i) {
|
||||
SSchema* pSchema = pCfg->pSchemas + pCfg->numOfColumns + i;
|
||||
char type[32];
|
||||
char comments[TSDB_COL_COMMENT_LEN + 16] = {0};
|
||||
sprintf(type, "%s", tDataTypes[pSchema->type].name);
|
||||
if (TSDB_DATA_TYPE_VARCHAR == pSchema->type || TSDB_DATA_TYPE_GEOMETRY == pSchema->type) {
|
||||
sprintf(type + strlen(type), "(%d)", (int32_t)(pSchema->bytes - VARSTR_HEADER_SIZE));
|
||||
} else if (TSDB_DATA_TYPE_NCHAR == pSchema->type) {
|
||||
sprintf(type + strlen(type), "(%d)", (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE));
|
||||
}
|
||||
if (pSchema->comment[0]) {
|
||||
sprintf(comments, " COMMENT '%s'", pSchema->comment);
|
||||
}
|
||||
|
||||
*len += sprintf(buf + VARSTR_HEADER_SIZE + *len, "%s`%s` %s", ((i > 0) ? ", " : ""), pSchema->name, type);
|
||||
*len += sprintf(buf + VARSTR_HEADER_SIZE + *len, "%s`%s` %s%s", ((i > 0) ? ", " : ""), pSchema->name, type, comments);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -624,7 +641,7 @@ void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg*
|
|||
}
|
||||
}
|
||||
|
||||
if (nSma < pCfg->numOfColumns) {
|
||||
if (nSma < pCfg->numOfColumns && nSma > 0) {
|
||||
bool smaOn = false;
|
||||
*len += sprintf(buf + VARSTR_HEADER_SIZE + *len, " SMA(");
|
||||
for (int32_t i = 0; i < pCfg->numOfColumns; ++i) {
|
||||
|
|
|
@ -291,17 +291,17 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
switch (pNode->type) {
|
||||
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: {
|
||||
STagScanPhysiNode *pTagScanNode = (STagScanPhysiNode *)pNode;
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->tableName.tname);
|
||||
EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->scan.tableName.tname);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT);
|
||||
if (pResNode->pExecInfo) {
|
||||
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
}
|
||||
if (pTagScanNode->pScanPseudoCols) {
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->pScanPseudoCols->length);
|
||||
if (pTagScanNode->scan.pScanPseudoCols) {
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->scan.pScanPseudoCols->length);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
}
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->totalRowSize);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->scan.node.pOutputDataBlockDesc->totalRowSize);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
|
||||
|
@ -309,11 +309,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
|
|||
if (verbose) {
|
||||
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT,
|
||||
nodesGetOutputNumFromSlotList(pTagScanNode->node.pOutputDataBlockDesc->pSlots));
|
||||
nodesGetOutputNumFromSlotList(pTagScanNode->scan.node.pOutputDataBlockDesc->pSlots));
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->outputRowSize);
|
||||
EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->node.pLimit);
|
||||
EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->node.pSlimit);
|
||||
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->scan.node.pOutputDataBlockDesc->outputRowSize);
|
||||
EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->scan.node.pLimit);
|
||||
EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->scan.node.pSlimit);
|
||||
EXPLAIN_ROW_END();
|
||||
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
|
||||
|
||||
|
|
|
@ -183,11 +183,17 @@ void cleanupQueryTableDataCond(SQueryTableDataCond* pCond);
|
|||
|
||||
int32_t convertFillType(int32_t mode);
|
||||
int32_t resultrowComparAsc(const void* p1, const void* p2);
|
||||
int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified, SStorageAPI *pAPI);
|
||||
|
||||
void printDataBlock(SSDataBlock* pBlock, const char* flag);
|
||||
int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified, SStorageAPI* pAPI);
|
||||
char* getStreamOpName(uint16_t opType);
|
||||
void printDataBlock(SSDataBlock* pBlock, const char* flag, const char* taskIdStr);
|
||||
void printSpecDataBlock(SSDataBlock* pBlock, const char* flag, const char* opStr, const char* taskIdStr);
|
||||
|
||||
void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order);
|
||||
void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery);
|
||||
|
||||
TSKEY getStartTsKey(STimeWindow* win, const TSKEY* tsCols);
|
||||
void updateTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pWin, int64_t delta);
|
||||
|
||||
SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode,
|
||||
SStorageAPI* pStorageAPI);
|
||||
#endif // TDENGINE_EXECUTIL_H
|
||||
|
|
|
@ -251,6 +251,12 @@ typedef struct STableMergeScanInfo {
|
|||
SSortExecInfo sortExecInfo;
|
||||
} STableMergeScanInfo;
|
||||
|
||||
typedef struct STagScanFilterContext {
|
||||
SHashObj* colHash;
|
||||
int32_t index;
|
||||
SArray* cInfoList;
|
||||
} STagScanFilterContext;
|
||||
|
||||
typedef struct STagScanInfo {
|
||||
SColumnInfo* pCols;
|
||||
SSDataBlock* pRes;
|
||||
|
@ -259,6 +265,14 @@ typedef struct STagScanInfo {
|
|||
SLimitNode* pSlimit;
|
||||
SReadHandle readHandle;
|
||||
STableListInfo* pTableListInfo;
|
||||
uint64_t suid;
|
||||
void* pCtbCursor;
|
||||
SNode* pTagCond;
|
||||
SNode* pTagIndexCond;
|
||||
STagScanFilterContext filterCtx;
|
||||
SArray* aUidTags; // SArray<STUidTagInfo>
|
||||
SArray* aFilterIdxs; // SArray<int32_t>
|
||||
SStorageAPI* pStorageAPI;
|
||||
} STagScanInfo;
|
||||
|
||||
typedef enum EStreamScanMode {
|
||||
|
@ -456,7 +470,6 @@ typedef struct SStreamIntervalOperatorInfo {
|
|||
SArray* pPullWins; // SPullWindowInfo
|
||||
int32_t pullIndex;
|
||||
SSDataBlock* pPullDataRes;
|
||||
bool isFinal;
|
||||
SArray* pChildren;
|
||||
int32_t numOfChild;
|
||||
SStreamState* pState; // void
|
||||
|
@ -507,7 +520,6 @@ typedef struct SStreamSessionAggOperatorInfo {
|
|||
void* pDelIterator;
|
||||
SArray* pChildren; // cache for children's result; final stream operator
|
||||
SPhysiNode* pPhyNode; // create new child
|
||||
bool isFinal;
|
||||
bool ignoreExpiredData;
|
||||
bool ignoreExpiredDataSaved;
|
||||
SArray* pUpdated;
|
||||
|
@ -695,6 +707,13 @@ uint64_t calcGroupId(char* pData, int32_t len);
|
|||
void streamOpReleaseState(struct SOperatorInfo* pOperator);
|
||||
void streamOpReloadState(struct SOperatorInfo* pOperator);
|
||||
|
||||
bool inSlidingWindow(SInterval* pInterval, STimeWindow* pWin, SDataBlockInfo* pBlockInfo);
|
||||
bool inCalSlidingWindow(SInterval* pInterval, STimeWindow* pWin, TSKEY calStart, TSKEY calEnd, EStreamType blockType);
|
||||
bool compareVal(const char* v, const SStateKeys* pKey);
|
||||
|
||||
int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext, SDataBlockInfo* pDataBlockInfo,
|
||||
TSKEY* primaryKeys, int32_t prevPosition, int32_t order);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -81,7 +81,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
|
|||
|
||||
SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
|
||||
SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode*pTagIndexCond, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo);
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ struct SExecTaskInfo {
|
|||
void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst);
|
||||
SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model, SStorageAPI* pAPI);
|
||||
void doDestroyTask(SExecTaskInfo* pTaskInfo);
|
||||
bool isTaskKilled(SExecTaskInfo* pTaskInfo);
|
||||
bool isTaskKilled(void* pTaskInfo);
|
||||
void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode);
|
||||
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
|
||||
int32_t createExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
|
||||
|
|
|
@ -191,7 +191,8 @@ int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols);
|
|||
bool tsortIsClosed(SSortHandle* pHandle);
|
||||
void tsortSetClosed(SSortHandle* pHandle);
|
||||
|
||||
void setSingleTableMerge(SSortHandle* pHandle);
|
||||
void tsortSetSingleTableMerge(SSortHandle* pHandle);
|
||||
void tsortSetAbortCheckFn(SSortHandle* pHandle, bool (*checkFn)(void* param), void* param);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -58,16 +58,6 @@ static void doKeepTuple(SWindowRowsSup* pRowSup, int64_t ts, uint64_t groupId) {
|
|||
pRowSup->groupId = groupId;
|
||||
}
|
||||
|
||||
static void updateTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pWin, bool includeEndpoint) {
|
||||
int64_t* ts = (int64_t*)pColData->pData;
|
||||
int32_t delta = includeEndpoint ? 1 : 0;
|
||||
|
||||
int64_t duration = pWin->ekey - pWin->skey + delta;
|
||||
ts[2] = duration; // set the duration
|
||||
ts[3] = pWin->skey; // window start key
|
||||
ts[4] = pWin->ekey + delta; // window end key
|
||||
}
|
||||
|
||||
SOperatorInfo* createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* physiNode,
|
||||
SExecTaskInfo* pTaskInfo) {
|
||||
SEventWindowOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SEventWindowOperatorInfo));
|
||||
|
@ -250,7 +240,7 @@ static void doEventWindowAggImpl(SEventWindowOperatorInfo* pInfo, SExprSupp* pSu
|
|||
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_APP_ERROR);
|
||||
}
|
||||
|
||||
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false);
|
||||
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, 0);
|
||||
applyAggFunctionOnPartialTuples(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startIndex, numOfRows,
|
||||
pBlock->info.rows, numOfOutput);
|
||||
}
|
||||
|
|
|
@ -47,8 +47,6 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* p
|
|||
|
||||
static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond,
|
||||
STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI);
|
||||
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode,
|
||||
SStorageAPI* pStorageAPI);
|
||||
|
||||
static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; }
|
||||
static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; }
|
||||
|
@ -846,7 +844,7 @@ static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, S
|
|||
return -1;
|
||||
}
|
||||
|
||||
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode,
|
||||
SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode,
|
||||
SStorageAPI* pStorageAPI) {
|
||||
SSDataBlock* pResBlock = createDataBlock();
|
||||
if (pResBlock == NULL) {
|
||||
|
@ -1677,6 +1675,7 @@ SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) {
|
|||
.intervalUnit = pTableScanNode->intervalUnit,
|
||||
.slidingUnit = pTableScanNode->slidingUnit,
|
||||
.offset = pTableScanNode->offset,
|
||||
.precision = pTableScanNode->scan.node.pOutputDataBlockDesc->precision,
|
||||
};
|
||||
|
||||
return interval;
|
||||
|
@ -2178,12 +2177,67 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
void printDataBlock(SSDataBlock* pBlock, const char* flag) {
|
||||
char* getStreamOpName(uint16_t opType) {
|
||||
switch (opType) {
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
|
||||
return "stream scan";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
|
||||
return "project";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
|
||||
return "interval single";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL:
|
||||
return "interval final";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
|
||||
return "interval semi";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
|
||||
return "stream fill";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
|
||||
return "session single";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION:
|
||||
return "session semi";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION:
|
||||
return "session final";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
|
||||
return "state single";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
|
||||
return "stream partitionby";
|
||||
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT:
|
||||
return "stream event";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
void printDataBlock(SSDataBlock* pBlock, const char* flag, const char* taskIdStr) {
|
||||
if (!pBlock || pBlock->info.rows == 0) {
|
||||
qDebug("===stream===%s: Block is Null or Empty", flag);
|
||||
qDebug("%s===stream===%s: Block is Null or Empty", taskIdStr, flag);
|
||||
return;
|
||||
}
|
||||
char* pBuf = NULL;
|
||||
qDebug("%s", dumpBlockData(pBlock, flag, &pBuf));
|
||||
qDebug("%s", dumpBlockData(pBlock, flag, &pBuf, taskIdStr));
|
||||
taosMemoryFree(pBuf);
|
||||
}
|
||||
|
||||
void printSpecDataBlock(SSDataBlock* pBlock, const char* flag, const char* opStr, const char* taskIdStr) {
|
||||
if (!pBlock || pBlock->info.rows == 0) {
|
||||
qDebug("%s===stream===%s: Block is Null or Empty", taskIdStr, flag);
|
||||
return;
|
||||
}
|
||||
if (qDebugFlag & DEBUG_DEBUG) {
|
||||
char* pBuf = NULL;
|
||||
char flagBuf[64];
|
||||
snprintf(flagBuf, sizeof(flagBuf), "%s %s", flag, opStr);
|
||||
qDebug("%s", dumpBlockData(pBlock, flagBuf, &pBuf, taskIdStr));
|
||||
taosMemoryFree(pBuf);
|
||||
}
|
||||
}
|
||||
|
||||
TSKEY getStartTsKey(STimeWindow* win, const TSKEY* tsCols) { return tsCols == NULL ? win->skey : tsCols[0]; }
|
||||
|
||||
void updateTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pWin, int64_t delta) {
|
||||
int64_t* ts = (int64_t*)pColData->pData;
|
||||
|
||||
int64_t duration = pWin->ekey - pWin->skey + delta;
|
||||
ts[2] = duration; // set the duration
|
||||
ts[3] = pWin->skey; // window start key
|
||||
ts[4] = pWin->ekey + delta; // window end key
|
||||
}
|
||||
|
|
|
@ -589,6 +589,10 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo
|
|||
int64_t st = taosGetTimestampUs();
|
||||
|
||||
int32_t blockIndex = 0;
|
||||
int32_t rowsThreshold = pTaskInfo->pSubplan->rowsThreshold;
|
||||
if (!pTaskInfo->pSubplan->dynamicRowThreshold || 4096 <= pTaskInfo->pSubplan->rowsThreshold) {
|
||||
rowsThreshold = 4096;
|
||||
}
|
||||
while ((pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot)) != NULL) {
|
||||
SSDataBlock* p = NULL;
|
||||
if (blockIndex >= taosArrayGetSize(pTaskInfo->pResultBlockList)) {
|
||||
|
@ -606,10 +610,13 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo
|
|||
ASSERT(p->info.rows > 0);
|
||||
taosArrayPush(pResList, &p);
|
||||
|
||||
if (current >= 4096) {
|
||||
if (current >= rowsThreshold) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (pTaskInfo->pSubplan->dynamicRowThreshold) {
|
||||
pTaskInfo->pSubplan->rowsThreshold -= current;
|
||||
}
|
||||
|
||||
*hasMore = (pRes != NULL);
|
||||
uint64_t el = (taosGetTimestampUs() - st);
|
||||
|
|
|
@ -1070,3 +1070,15 @@ void streamOpReloadState(SOperatorInfo* pOperator) {
|
|||
downstream->fpSet.reloadStreamStateFn(downstream);
|
||||
}
|
||||
}
|
||||
|
||||
bool compareVal(const char* v, const SStateKeys* pKey) {
|
||||
if (IS_VAR_DATA_TYPE(pKey->type)) {
|
||||
if (varDataLen(v) != varDataLen(pKey->pData)) {
|
||||
return false;
|
||||
} else {
|
||||
return memcmp(varDataVal(v), varDataVal(pKey->pData), varDataLen(v)) == 0;
|
||||
}
|
||||
} else {
|
||||
return memcmp(pKey->pData, v, pKey->bytes) == 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1292,14 +1292,14 @@ static SSDataBlock* doStreamFill(SOperatorInfo* pOperator) {
|
|||
(pInfo->pFillInfo->pos != FILL_POS_INVALID && pInfo->pFillInfo->needFill == true)) {
|
||||
doStreamFillRange(pInfo->pFillInfo, pInfo->pFillSup, pInfo->pRes);
|
||||
if (pInfo->pRes->info.rows > 0) {
|
||||
printDataBlock(pInfo->pRes, "stream fill");
|
||||
printDataBlock(pInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pRes;
|
||||
}
|
||||
}
|
||||
if (pOperator->status == OP_RES_TO_RETURN) {
|
||||
doDeleteFillFinalize(pOperator);
|
||||
if (pInfo->pRes->info.rows > 0) {
|
||||
printDataBlock(pInfo->pRes, "stream fill");
|
||||
printDataBlock(pInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pRes;
|
||||
}
|
||||
setOperatorCompleted(pOperator);
|
||||
|
@ -1317,12 +1317,12 @@ static SSDataBlock* doStreamFill(SOperatorInfo* pOperator) {
|
|||
pOperator->status = OP_RES_TO_RETURN;
|
||||
pInfo->pFillInfo->preRowKey = INT64_MIN;
|
||||
if (pInfo->pRes->info.rows > 0) {
|
||||
printDataBlock(pInfo->pRes, "stream fill");
|
||||
printDataBlock(pInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pRes;
|
||||
}
|
||||
break;
|
||||
}
|
||||
printDataBlock(pBlock, "stream fill recv");
|
||||
printSpecDataBlock(pBlock, getStreamOpName(pOperator->operatorType), "recv", GET_TASKID(pTaskInfo));
|
||||
|
||||
if (pInfo->pFillInfo->curGroupId != pBlock->info.id.groupId) {
|
||||
pInfo->pFillInfo->curGroupId = pBlock->info.id.groupId;
|
||||
|
@ -1339,7 +1339,7 @@ static SSDataBlock* doStreamFill(SOperatorInfo* pOperator) {
|
|||
pInfo->pFillSup->hasDelete = true;
|
||||
doDeleteFillResult(pOperator);
|
||||
if (pInfo->pDelRes->info.rows > 0) {
|
||||
printDataBlock(pInfo->pDelRes, "stream fill delete");
|
||||
printDataBlock(pInfo->pDelRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pDelRes;
|
||||
}
|
||||
continue;
|
||||
|
@ -1378,7 +1378,7 @@ static SSDataBlock* doStreamFill(SOperatorInfo* pOperator) {
|
|||
}
|
||||
|
||||
pOperator->resultInfo.totalRows += pInfo->pRes->info.rows;
|
||||
printDataBlock(pInfo->pRes, "stream fill");
|
||||
printDataBlock(pInfo->pRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pRes;
|
||||
}
|
||||
|
||||
|
|
|
@ -956,7 +956,8 @@ static bool hasRemainPartion(SStreamPartitionOperatorInfo* pInfo) { return pInfo
|
|||
static bool hasRemainTbName(SStreamPartitionOperatorInfo* pInfo) { return pInfo->pTbNameIte != NULL; }
|
||||
|
||||
static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) {
|
||||
SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
|
||||
SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
|
||||
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
|
||||
|
||||
SStreamPartitionOperatorInfo* pInfo = pOperator->info;
|
||||
SSDataBlock* pDest = pInfo->binfo.pRes;
|
||||
|
@ -994,7 +995,7 @@ static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) {
|
|||
pOperator->resultInfo.totalRows += pDest->info.rows;
|
||||
pInfo->parIte = taosHashIterate(pInfo->pPartitions, pInfo->parIte);
|
||||
ASSERT(pDest->info.rows > 0);
|
||||
printDataBlock(pDest, "stream partitionby");
|
||||
printDataBlock(pDest, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pDest;
|
||||
}
|
||||
|
||||
|
@ -1115,7 +1116,7 @@ static SSDataBlock* doStreamHashPartition(SOperatorInfo* pOperator) {
|
|||
setOperatorCompleted(pOperator);
|
||||
return NULL;
|
||||
}
|
||||
printDataBlock(pBlock, "stream partitionby recv");
|
||||
printSpecDataBlock(pBlock, getStreamOpName(pOperator->operatorType), "recv", GET_TASKID(pTaskInfo));
|
||||
switch (pBlock->info.type) {
|
||||
case STREAM_NORMAL:
|
||||
case STREAM_PULL_DATA:
|
||||
|
@ -1125,7 +1126,7 @@ static SSDataBlock* doStreamHashPartition(SOperatorInfo* pOperator) {
|
|||
case STREAM_DELETE_DATA: {
|
||||
copyDataBlock(pInfo->pDelRes, pBlock);
|
||||
pInfo->pDelRes->info.type = STREAM_DELETE_RESULT;
|
||||
printDataBlock(pInfo->pDelRes, "stream partitionby delete");
|
||||
printDataBlock(pInfo->pDelRes, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
|
||||
return pInfo->pDelRes;
|
||||
} break;
|
||||
default:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue