diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in new file mode 100644 index 0000000000..5a68020dd7 --- /dev/null +++ b/cmake/apr-util_CMakeLists.txt.in @@ -0,0 +1,19 @@ +# apr-util +ExternalProject_Add(aprutil-1 + URL https://dlcdn.apache.org//apr/apr-util-1.6.3.tar.gz + URL_HASH SHA256=2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983 + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/apache/apr-util.git + #GIT_TAG 1.5.4 + SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" + #BINARY_DIR "" + BUILD_IN_SOURCE TRUE + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1 + #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr + BUILD_COMMAND make + INSTALL_COMMAND make install + TEST_COMMAND "" +) diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in new file mode 100644 index 0000000000..18c4eb62a1 --- /dev/null +++ b/cmake/apr_CMakeLists.txt.in @@ -0,0 +1,19 @@ +# apr +ExternalProject_Add(apr-1 + URL https://dlcdn.apache.org//apr/apr-1.7.4.tar.gz + URL_HASH SHA256=a4137dd82a185076fa50ba54232d920a17c6469c30b0876569e1c2a05ff311d9 + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/apache/apr.git + #GIT_TAG 1.5.2 + SOURCE_DIR "${TD_CONTRIB_DIR}/apr" + BUILD_IN_SOURCE TRUE + UPDATE_DISCONNECTED TRUE + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --enable-shared=no + #CONFIGURE_COMMAND ./configure + BUILD_COMMAND make + INSTALL_COMMAND make install + TEST_COMMAND "" +) diff --git a/cmake/cmake.define b/cmake/cmake.define index cf7f450994..edc5dd601a 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -1,5 +1,4 @@ cmake_minimum_required(VERSION 3.0) - set(CMAKE_VERBOSE_MAKEFILE ON) set(TD_BUILD_TAOSA_INTERNAL FALSE) @@ -78,6 +77,12 @@ ELSE () SET(TD_TAOS_TOOLS TRUE) ENDIF () +IF (${TD_WINDOWS}) + SET(TAOS_LIB taos_static) +ELSE () + SET(TAOS_LIB taos) +ENDIF () + IF (TD_WINDOWS) MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd") diff --git a/cmake/cmake.options b/cmake/cmake.options index fa0b888415..1d4e9ba515 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -125,6 +125,16 @@ option( ON ) +IF(${TD_LINUX}) + +option( + BUILD_WITH_COS + "If build with cos" + ON +) + +ENDIF () + option( BUILD_WITH_SQLITE "If build with sqlite" diff --git a/cmake/cos_CMakeLists.txt.in b/cmake/cos_CMakeLists.txt.in new file mode 100644 index 0000000000..ee1e58b50f --- /dev/null +++ b/cmake/cos_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# cos +ExternalProject_Add(cos + GIT_REPOSITORY https://github.com/tencentyun/cos-c-sdk-v5.git + GIT_TAG v5.0.16 + SOURCE_DIR "${TD_CONTRIB_DIR}/cos-c-sdk-v5" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in new file mode 100644 index 0000000000..0fe0c2256f --- /dev/null +++ b/cmake/curl_CMakeLists.txt.in @@ -0,0 +1,17 @@ +# curl +ExternalProject_Add(curl + URL https://curl.se/download/curl-8.2.1.tar.gz + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/curl/curl.git + #GIT_TAG curl-7_88_1 + SOURCE_DIR "${TD_CONTRIB_DIR}/curl" + BUILD_IN_SOURCE TRUE + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli + #CONFIGURE_COMMAND ./configure --without-ssl + BUILD_COMMAND make + INSTALL_COMMAND make install + TEST_COMMAND "" +) diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in new file mode 100644 index 0000000000..9dcb5df665 --- /dev/null +++ b/cmake/mxml_CMakeLists.txt.in @@ -0,0 +1,14 @@ +# cos +ExternalProject_Add(mxml + GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git + GIT_TAG release-2.10 + SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" + #BINARY_DIR "" + BUILD_IN_SOURCE TRUE + #UPDATE_COMMAND "" + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --enable-shared=no + #CONFIGURE_COMMAND ./configure + BUILD_COMMAND make + INSTALL_COMMAND make install + TEST_COMMAND "" +) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index c60fd33b16..e3e48ac3a1 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -6,6 +6,39 @@ function(cat IN_FILE OUT_FILE) file(APPEND ${OUT_FILE} "${CONTENTS}") endfunction(cat IN_FILE OUT_FILE) +if(${TD_LINUX}) + +set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") +configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + +if(${BUILD_WITH_COS}) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) +endif(${BUILD_WITH_COS}) + +configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") +execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +execute_process(COMMAND "${CMAKE_COMMAND}" --build . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + +set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2") +configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) + +if(${BUILD_WITH_COS}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) +endif(${BUILD_WITH_COS}) + +configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") +execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +execute_process(COMMAND "${CMAKE_COMMAND}" --build . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + +endif(${TD_LINUX}) + set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -122,6 +155,16 @@ if(${BUILD_WITH_SQLITE}) cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif(${BUILD_WITH_SQLITE}) +# cos +if(${BUILD_WITH_COS}) + #cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + add_definitions(-DUSE_COS) +endif(${BUILD_WITH_COS}) + # lucene if(${BUILD_WITH_LUCENE}) cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -347,6 +390,31 @@ if (${BUILD_WITH_ROCKSDB}) endif() endif() +# cos +if(${BUILD_WITH_COS}) + if(${TD_LINUX}) + set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1) + #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) + option(ENABLE_TEST "Enable the tests" OFF) + INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include) + MESSAGE("$ENV{HOME}/.cos-local.1/include") + + set(CMAKE_BUILD_TYPE debug) + set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) + set(CMAKE_PROJECT_NAME cos_c_sdk) + + add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL) + target_include_directories( + cos_c_sdk + PUBLIC $ + ) + + set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) + else() + + endif(${TD_LINUX}) +endif(${BUILD_WITH_COS}) + # lucene # To support build on ubuntu: sudo apt-get install libboost-all-dev if(${BUILD_WITH_LUCENE}) diff --git a/contrib/test/CMakeLists.txt b/contrib/test/CMakeLists.txt index f35cf0d13d..1deff5a67e 100644 --- a/contrib/test/CMakeLists.txt +++ b/contrib/test/CMakeLists.txt @@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB}) add_subdirectory(rocksdb) endif(${BUILD_WITH_ROCKSDB}) +# cos +if(${BUILD_WITH_COS}) + add_subdirectory(cos) +endif(${BUILD_WITH_COS}) + if(${BUILD_WITH_LUCENE}) add_subdirectory(lucene) endif(${BUILD_WITH_LUCENE}) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt new file mode 100644 index 0000000000..f8804033de --- /dev/null +++ b/contrib/test/cos/CMakeLists.txt @@ -0,0 +1,49 @@ +add_executable(cosTest "") +target_sources(cosTest + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/main.c" + ) + +#find_path(APR_INCLUDE_DIR apr-1/apr_time.h) +#find_path(APR_UTIL_INCLUDE_DIR apr/include/apr-1/apr_md5.h) +#find_path(MINIXML_INCLUDE_DIR mxml.h) +#find_path(CURL_INCLUDE_DIR curl/curl.h) + +#include_directories (${MINIXML_INCLUDE_DIR}) +#include_directories (${CURL_INCLUDE_DIR}) +FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) +#FIND_PROGRAM(APU_CONFIG_BIN NAMES apu-config apu-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) + +IF (APR_CONFIG_BIN) + EXECUTE_PROCESS( + COMMAND ${APR_CONFIG_BIN} --includedir + OUTPUT_VARIABLE APR_INCLUDE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +ENDIF() +#IF (APU_CONFIG_BIN) +# EXECUTE_PROCESS( +# COMMAND ${APU_CONFIG_BIN} --includedir +# OUTPUT_VARIABLE APR_UTIL_INCLUDE_DIR +# OUTPUT_STRIP_TRAILING_WHITESPACE +# ) +#ENDIF() + +include_directories (${APR_INCLUDE_DIR}) +#include_directories (${APR_UTIL_INCLUDE_DIR}) + +target_include_directories( + cosTest + PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + ) + +#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +#find_library(MINIXML_LIBRARY mxml) +#find_library(CURL_LIBRARY curl) + +target_link_libraries(cosTest cos_c_sdk) +target_link_libraries(cosTest apr-1}) +target_link_libraries(cosTest aprutil-1}) +target_link_libraries(cosTest mxml) +target_link_libraries(cosTest curl) diff --git a/contrib/test/cos/main.c b/contrib/test/cos/main.c new file mode 100644 index 0000000000..7e5e7c8c8b --- /dev/null +++ b/contrib/test/cos/main.c @@ -0,0 +1,3095 @@ +#include +#include +#include +#include + +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + +// endpoint 是 COS 访问域名信息,详情请参见 https://cloud.tencent.com/document/product/436/6224 文档 +// static char TEST_COS_ENDPOINT[] = "cos.ap-guangzhou.myqcloud.com"; +// static char TEST_COS_ENDPOINT[] = "http://oss-cn-beijing.aliyuncs.com"; +static char TEST_COS_ENDPOINT[] = "http://cos.ap-beijing.myqcloud.com"; +// 数据万象的访问域名,详情请参见 https://cloud.tencent.com/document/product/460/31066 文档 +static char TEST_CI_ENDPOINT[] = "https://ci.ap-guangzhou.myqcloud.com"; +// 开发者拥有的项目身份ID/密钥,可在 https://console.cloud.tencent.com/cam/capi 页面获取 +static char *TEST_ACCESS_KEY_ID; // your secret_id +static char *TEST_ACCESS_KEY_SECRET; // your secret_key +// 开发者访问 COS 服务时拥有的用户维度唯一资源标识,用以标识资源,可在 https://console.cloud.tencent.com/cam/capi +// 页面获取 +// static char TEST_APPID[] = ""; // your appid +// static char TEST_APPID[] = "119"; // your appid +static char TEST_APPID[] = "1309024725"; // your appid +// the cos bucket name, syntax: [bucket]-[appid], for example: mybucket-1253666666,可在 +// https://console.cloud.tencent.com/cos5/bucket 查看 static char TEST_BUCKET_NAME[] = ""; +// static char TEST_BUCKET_NAME[] = ""; +// static char TEST_BUCKET_NAME[] = "test-bucket-119"; +static char TEST_BUCKET_NAME[] = "test0711-1309024725"; +// 对象拥有者,比如用户UIN:100000000001 +static char TEST_UIN[] = ""; // your uin +// 地域信息,枚举值可参见 https://cloud.tencent.com/document/product/436/6224 +// 文档,例如:ap-beijing、ap-hongkong、eu-frankfurt 等 +static char TEST_REGION[] = "ap-guangzhou"; // region in endpoint +// 对象键,对象(Object)在存储桶(Bucket)中的唯一标识。有关对象与对象键的进一步说明,请参见 +// https://cloud.tencent.com/document/product/436/13324 文档 +static char TEST_OBJECT_NAME1[] = "1.txt"; +static char TEST_OBJECT_NAME2[] = "test2.dat"; +static char TEST_OBJECT_NAME3[] = "test3.dat"; +static char TEST_OBJECT_NAME4[] = "multipart.txt"; +// static char TEST_DOWNLOAD_NAME2[] = "download_test2.dat"; +static char *TEST_APPEND_NAMES[] = {"test.7z.001", "test.7z.002"}; +static char TEST_DOWNLOAD_NAME3[] = "download_test3.dat"; +static char TEST_MULTIPART_OBJECT[] = "multipart.dat"; +static char TEST_DOWNLOAD_NAME4[] = "multipart_download.dat"; +static char TEST_MULTIPART_FILE[] = "test.zip"; +// static char TEST_MULTIPART_OBJECT2[] = "multipart2.dat"; +static char TEST_MULTIPART_OBJECT3[] = "multipart3.dat"; +static char TEST_MULTIPART_OBJECT4[] = "multipart4.dat"; + +static void print_headers(cos_table_t *headers) { + const cos_array_header_t *tarr; + const cos_table_entry_t *telts; + int i = 0; + + if (apr_is_empty_table(headers)) { + return; + } + + tarr = cos_table_elts(headers); + telts = (cos_table_entry_t *)tarr->elts; + + printf("headers:\n"); + for (; i < tarr->nelts; i++) { + telts = (cos_table_entry_t *)(tarr->elts + i * tarr->elt_size); + printf("%s: %s\n", telts->key, telts->val); + } +} + +void init_test_config(cos_config_t *config, int is_cname) { + cos_str_set(&config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&config->appid, TEST_APPID); + config->is_cname = is_cname; +} + +void init_test_request_options(cos_request_options_t *options, int is_cname) { + options->config = cos_config_create(options->pool); + init_test_config(options->config, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); +} + +void log_status(cos_status_t *s) { + cos_warn_log("status->code: %d", s->code); + if (s->error_code) cos_warn_log("status->error_code: %s", s->error_code); + if (s->error_msg) cos_warn_log("status->error_msg: %s", s->error_msg); + if (s->req_id) cos_warn_log("status->req_id: %s", s->req_id); +} + +void test_sign() { + cos_pool_t *p = NULL; + const unsigned char secret_key[] = "your secret_key"; + const unsigned char time_str[] = "1480932292;1481012292"; + unsigned char sign_key[40]; + cos_buf_t *fmt_str; + const char *value = NULL; + const char *uri = "/testfile"; + const char *host = "testbucket-125000000.cn-north.myqcloud.com&range=bytes%3d0-3"; + unsigned char fmt_str_hex[40]; + + cos_pool_create(&p, NULL); + fmt_str = cos_create_buf(p, 1024); + + cos_get_hmac_sha1_hexdigest(sign_key, secret_key, sizeof(secret_key) - 1, time_str, sizeof(time_str) - 1); + char *pstr = apr_pstrndup(p, (char *)sign_key, sizeof(sign_key)); + cos_warn_log("sign_key: %s", pstr); + + // method + value = "get"; + cos_buf_append_string(p, fmt_str, value, strlen(value)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // canonicalized resource(URI) + cos_buf_append_string(p, fmt_str, uri, strlen(uri)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // query-parameters + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // Host + cos_buf_append_string(p, fmt_str, "host=", sizeof("host=") - 1); + cos_buf_append_string(p, fmt_str, host, strlen(host)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + char *pstr3 = apr_pstrndup(p, (char *)fmt_str->pos, cos_buf_size(fmt_str)); + cos_warn_log("Format string: %s", pstr3); + + // Format-String sha1hash + cos_get_sha1_hexdigest(fmt_str_hex, (unsigned char *)fmt_str->pos, cos_buf_size(fmt_str)); + + char *pstr2 = apr_pstrndup(p, (char *)fmt_str_hex, sizeof(fmt_str_hex)); + cos_warn_log("Format string sha1hash: %s", pstr2); + + cos_pool_destroy(p); +} + +void test_bucket() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_acl_e cos_acl = COS_ACL_PRIVATE; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // create test bucket + s = cos_create_bucket(options, &bucket, cos_acl, &resp_headers); + log_status(s); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->encoding_type, "url"); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + log_status(s); + cos_list_object_content_t *content = NULL; + char *line = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + line = apr_psprintf(p, "%.*s\t%.*s\t%.*s\n", content->key.len, content->key.data, content->size.len, + content->size.data, content->last_modified.len, content->last_modified.data); + printf("%s", line); + printf("next marker: %s\n", list_params->next_marker.data); + } + cos_list_object_common_prefix_t *common_prefix = NULL; + cos_list_for_each_entry(cos_list_object_common_prefix_t, common_prefix, &list_params->common_prefix_list, node) { + printf("common prefix: %s\n", common_prefix->prefix.data); + } + + // delete bucket + s = cos_delete_bucket(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_list_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //获取对象列表 + cos_list_object_params_t *list_params = NULL; + cos_list_object_content_t *content = NULL; + list_params = cos_create_list_object_params(p); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (cos_status_is_ok(s)) { + printf("list object succeeded\n"); + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %.*s\n", content->key.len, content->key.data); + } + } else { + printf("list object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_bucket_lifecycle() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_t rule_list; + cos_list_init(&rule_list); + cos_lifecycle_rule_content_t *rule_content = NULL; + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule1"); + cos_str_set(&rule_content->prefix, "abc/"); + cos_str_set(&rule_content->status, "Enabled"); + rule_content->expire.days = 365; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule2"); + cos_str_set(&rule_content->prefix, "efg/"); + cos_str_set(&rule_content->status, "Disabled"); + cos_str_set(&rule_content->transition.storage_class, "Standard_IA"); + rule_content->transition.days = 999; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule3"); + cos_str_set(&rule_content->prefix, "xxx/"); + cos_str_set(&rule_content->status, "Enabled"); + rule_content->abort.days = 1; + cos_list_add_tail(&rule_content->node, &rule_list); + + s = cos_put_bucket_lifecycle(options, &bucket, &rule_list, &resp_headers); + log_status(s); + + cos_list_t rule_list_ret; + cos_list_init(&rule_list_ret); + s = cos_get_bucket_lifecycle(options, &bucket, &rule_list_ret, &resp_headers); + log_status(s); + + cos_delete_bucket_lifecycle(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_put_object_with_limit() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + + //上传对象 + cos_str_set(&file, "test_file.bin"); + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object succeeded\n"); + } else { + printf("put object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_get_object_with_limit() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + + //下载对象 + cos_str_set(&file, "test_file.bin"); + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); + if (cos_status_is_ok(s)) { + printf("get object succeeded\n"); + } else { + printf("get object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_gen_object_url() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + printf("url:%s\n", cos_gen_object_url(options, &bucket, &object)); + + cos_pool_destroy(p); +} + +void test_create_dir() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_list_t buffer; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "folder/"); + + //上传文件夹 + cos_list_init(&buffer); + s = cos_put_object_from_buffer(options, &bucket, &object, &buffer, headers, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object succeeded\n"); + } else { + printf("put object failed\n"); + } + cos_pool_destroy(p); +} + +void test_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_list_t buffer; + cos_buf_t *content = NULL; + char *str = "This is my test data."; + cos_string_t file; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + cos_list_init(&buffer); + content = cos_buf_pack(options->pool, str, strlen(str)); + cos_list_add_tail(&content->node, &buffer); + s = cos_put_object_from_buffer(options, &bucket, &object, &buffer, headers, &resp_headers); + log_status(s); + + cos_list_t download_buffer; + cos_list_init(&download_buffer); + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + s = cos_get_object_to_buffer(options, &bucket, &object, headers, NULL, &download_buffer, &resp_headers); + log_status(s); + print_headers(resp_headers); + int64_t len = 0; + int64_t size = 0; + int64_t pos = 0; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { len += cos_buf_size(content); } + char *buf = cos_pcalloc(p, (apr_size_t)(len + 1)); + buf[len] = '\0'; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { + size = cos_buf_size(content); + memcpy(buf + pos, content->pos, (size_t)size); + pos += size; + } + cos_warn_log("Download data=%s", buf); + + cos_str_set(&file, TEST_OBJECT_NAME4); + cos_str_set(&object, TEST_OBJECT_NAME4); + s = cos_put_object_from_file(options, &bucket, &object, &file, NULL, &resp_headers); + log_status(s); + + cos_str_set(&file, TEST_DOWNLOAD_NAME3); + cos_str_set(&object, TEST_OBJECT_NAME3); + s = cos_get_object_to_file(options, &bucket, &object, NULL, NULL, &file, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME2); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_delete_object(options, &bucket, &object, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME3); + s = cos_delete_object(options, &bucket, &object, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_append_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //追加上传对象 + cos_str_set(&object, TEST_OBJECT_NAME3); + int32_t count = sizeof(TEST_APPEND_NAMES) / sizeof(char *); + int32_t index = 0; + int64_t position = 0; + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + if (s->code == 200) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + position = atol(content_length_str); + } + } + for (; index < count; index++) { + cos_str_set(&file, TEST_APPEND_NAMES[index]); + s = cos_append_object_from_file(options, &bucket, &object, position, &file, NULL, &resp_headers); + log_status(s); + + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + if (s->code == 200) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + position = atol(content_length_str); + } + } + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_head_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //获取对象元数据 + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + print_headers(resp_headers); + if (cos_status_is_ok(s)) { + long size = 0; + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + size = atol(content_length_str); + } + printf("head object succeeded: %ld\n", size); + } else { + printf("head object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_check_object_exist() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_object_exist_status_e object_exist; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + // 检查对象是否存在 + s = cos_check_object_exist(options, &bucket, &object, headers, &object_exist, &resp_headers); + if (object_exist == COS_OBJECT_NON_EXIST) { + printf("object: %.*s non exist.\n", object.len, object.data); + } else if (object_exist == COS_OBJECT_EXIST) { + printf("object: %.*s exist.\n", object.len, object.data); + } else { + printf("object: %.*s unknown status.\n", object.len, object.data); + log_status(s); + } + + cos_pool_destroy(p); +} + +void test_object_restore() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_status_t *s = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_restore.dat"); + + cos_object_restore_params_t *restore_params = cos_create_object_restore_params(p); + restore_params->days = 30; + cos_str_set(&restore_params->tier, "Standard"); + s = cos_post_object_restore(options, &bucket, &object, restore_params, NULL, NULL, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void progress_callback(int64_t consumed_bytes, int64_t total_bytes) { + printf("consumed_bytes = %" APR_INT64_T_FMT ", total_bytes = %" APR_INT64_T_FMT "\n", consumed_bytes, total_bytes); +} + +void test_put_object_from_file() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_table_t *headers = NULL; + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, TEST_OBJECT_NAME4); + cos_str_set(&object, TEST_OBJECT_NAME4); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_put_object_from_file_with_sse() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_table_t *headers = NULL; + headers = cos_table_make(p, 3); + // apr_table_add(headers, "x-cos-server-side-encryption", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-algorithm", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key", "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY="); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key-MD5", "U5L61r7jcwdNvT7frmUG8g=="); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, "/home/jojoliang/data/test.jpg"); + cos_str_set(&object, "pic"); + + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + { + int i = 0; + apr_array_header_t *pp = (apr_array_header_t *)apr_table_elts(resp_headers); + for (; i < pp->nelts; i++) { + apr_table_entry_t *ele = (apr_table_entry_t *)pp->elts + i; + printf("%s: %s\n", ele->key, ele->val); + } + } + + cos_pool_destroy(p); +} + +void test_get_object_to_file_with_sse() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + cos_table_t *headers = NULL; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 3); + /* + apr_table_add(headers, "x-cos-server-side-encryption", "AES256"); + */ + /* + apr_table_add(headers, "x-cos-server-side-encryption-customer-algorithm", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key", + "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY="); apr_table_add(headers, + "x-cos-server-side-encryption-customer-key-MD5", "U5L61r7jcwdNvT7frmUG8g=="); + */ + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, "getfile"); + cos_str_set(&object, TEST_OBJECT_NAME1); + + s = cos_get_object_to_file(options, &bucket, &object, headers, params, &file, &resp_headers); + log_status(s); + + { + int i = 0; + apr_array_header_t *pp = (apr_array_header_t *)apr_table_elts(resp_headers); + for (; i < pp->nelts; i++) { + apr_table_entry_t *ele = (apr_table_entry_t *)pp->elts + i; + printf("%s: %s\n", ele->key, ele->val); + } + } + + cos_pool_destroy(p); +} + +void multipart_upload_file_from_file() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *complete_headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_upload_file_t *upload_file = NULL; + cos_status_t *s = NULL; + cos_list_upload_part_params_t *params = NULL; + cos_list_t complete_part_list; + cos_list_part_content_t *part_content = NULL; + cos_complete_part_content_t *complete_part_content = NULL; + int part_num = 1; + int64_t pos = 0; + int64_t file_length = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + complete_headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + // init mulitipart + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + // upload part from file + int res = COSE_OK; + cos_file_buf_t *fb = cos_create_file_buf(p); + res = cos_open_file_for_all_read(p, TEST_MULTIPART_FILE, fb); + if (res != COSE_OK) { + cos_error_log("Open read file fail, filename:%s\n", TEST_MULTIPART_FILE); + return; + } + file_length = fb->file_last; + apr_file_close(fb->file); + while (pos < file_length) { + upload_file = cos_create_upload_file(p); + cos_str_set(&upload_file->filename, TEST_MULTIPART_FILE); + upload_file->file_pos = pos; + pos += 2 * 1024 * 1024; + upload_file->file_last = pos < file_length ? pos : file_length; // 2MB + s = cos_upload_part_from_file(options, &bucket, &object, &upload_id, part_num++, upload_file, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Multipart upload part from file succeeded\n"); + } else { + printf("Multipart upload part from file failed\n"); + } + } + + // list part + params = cos_create_list_upload_part_params(p); + params->max_ret = 1000; + cos_list_init(&complete_part_list); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, params, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("List multipart succeeded\n"); + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", part_content->part_number.data, + part_content->size.data, part_content->last_modified.data, part_content->etag.data); + } + } else { + printf("List multipart failed\n"); + cos_pool_destroy(p); + return; + } + + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, part_content->part_number.data); + cos_str_set(&complete_part_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + } + + // complete multipart + s = cos_complete_multipart_upload(options, &bucket, &object, &upload_id, &complete_part_list, complete_headers, + &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Complete multipart upload from file succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Complete multipart upload from file failed\n"); + } + + cos_pool_destroy(p); +} + +void multipart_upload_file_from_buffer() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *complete_headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_status_t *s = NULL; + cos_list_t complete_part_list; + cos_complete_part_content_t *complete_part_content = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + complete_headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + // init mulitipart + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + // upload part from buffer + char *str = "This is my test data...."; + cos_list_t buffer; + cos_buf_t *content; + + // 上传一个分块 + cos_list_init(&buffer); + content = cos_buf_pack(p, str, strlen(str)); + cos_list_add_tail(&content->node, &buffer); + s = cos_upload_part_from_buffer(options, &bucket, &object, &upload_id, 1, &buffer, &resp_headers); + + // 直接获取etag + char *etag = apr_pstrdup(p, (char *)apr_table_get(resp_headers, "ETag")); + cos_list_init(&complete_part_list); + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, "1"); + cos_str_set(&complete_part_content->etag, etag); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + + // 也可以通过 list part 获取取etag + /* + //list part + params = cos_create_list_upload_part_params(p); + params->max_ret = 1000; + cos_list_init(&complete_part_list); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, + params, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("List multipart succeeded\n"); + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", + part_content->part_number.data, + part_content->size.data, + part_content->last_modified.data, + part_content->etag.data); + } + } else { + printf("List multipart failed\n"); + cos_pool_destroy(p); + return; + } + + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, part_content->part_number.data); + cos_str_set(&complete_part_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + } + */ + + // complete multipart + s = cos_complete_multipart_upload(options, &bucket, &object, &upload_id, &complete_part_list, complete_headers, + &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Complete multipart upload from file succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Complete multipart upload from file failed\n"); + } + + cos_pool_destroy(p); +} + +void abort_multipart_upload() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_status_t *s = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + s = cos_abort_multipart_upload(options, &bucket, &object, &upload_id, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Abort multipart upload succeeded, upload_id::%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Abort multipart upload failed\n"); + } + + cos_pool_destroy(p); +} + +void list_multipart() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_status_t *s = NULL; + cos_list_multipart_upload_params_t *list_multipart_params = NULL; + cos_list_upload_part_params_t *list_upload_param = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + list_multipart_params = cos_create_list_multipart_upload_params(p); + list_multipart_params->max_ret = 999; + s = cos_list_multipart_upload(options, &bucket, list_multipart_params, &resp_headers); + log_status(s); + + list_upload_param = cos_create_list_upload_part_params(p); + list_upload_param->max_ret = 1000; + cos_string_t upload_id; + cos_str_set(&upload_id, "149373379126aee264fecbf5fe8ddb8b9cd23b76c73ab1af0bcfd50683cc4254f81ebe2386"); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, list_upload_param, &resp_headers); + if (cos_status_is_ok(s)) { + printf("List upload part succeeded, upload_id::%.*s\n", upload_id.len, upload_id.data); + cos_list_part_content_t *part_content = NULL; + cos_list_for_each_entry(cos_list_part_content_t, part_content, &list_upload_param->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", part_content->part_number.data, + part_content->size.data, part_content->last_modified.data, part_content->etag.data); + } + } else { + printf("List upload part failed\n"); + } + + cos_pool_destroy(p); +} + +void test_resumable() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t filepath; + cos_resumable_clt_params_t *clt_params; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT4); + cos_str_set(&filepath, TEST_DOWNLOAD_NAME4); + + clt_params = cos_create_resumable_clt_params_content(p, 5 * 1024 * 1024, 3, COS_FALSE, NULL); + s = cos_resumable_download_file(options, &bucket, &object, &filepath, NULL, NULL, clt_params, NULL); + log_status(s); + + cos_pool_destroy(p); +} + +void test_resumable_upload_with_multi_threads() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t filename; + cos_status_t *s = NULL; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_resumable_clt_params_t *clt_params; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT4); + cos_str_set(&filename, TEST_MULTIPART_FILE); + + // upload + clt_params = cos_create_resumable_clt_params_content(p, 1024 * 1024, 8, COS_FALSE, NULL); + s = cos_resumable_upload_file(options, &bucket, &object, &filename, headers, NULL, clt_params, NULL, &resp_headers, + NULL); + + if (cos_status_is_ok(s)) { + printf("upload succeeded\n"); + } else { + printf("upload failed\n"); + } + + cos_pool_destroy(p); +} + +void test_delete_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_status_t *s = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + char *object_name1 = TEST_OBJECT_NAME2; + char *object_name2 = TEST_OBJECT_NAME3; + cos_object_key_t *content1 = NULL; + cos_object_key_t *content2 = NULL; + cos_list_t object_list; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_init(&object_list); + cos_list_init(&deleted_object_list); + content1 = cos_create_cos_object_key(p); + cos_str_set(&content1->key, object_name1); + cos_list_add_tail(&content1->node, &object_list); + content2 = cos_create_cos_object_key(p); + cos_str_set(&content2->key, object_name2); + cos_list_add_tail(&content2->node, &object_list); + + s = cos_delete_objects(options, &bucket, &object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + + cos_pool_destroy(p); + + if (cos_status_is_ok(s)) { + printf("delete objects succeeded\n"); + } else { + printf("delete objects failed\n"); + } +} + +void test_delete_objects_by_prefix() { + cos_pool_t *p = NULL; + cos_request_options_t *options = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_status_t *s = NULL; + cos_string_t prefix; + char *prefix_str = ""; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&prefix, prefix_str); + + s = cos_delete_objects_by_prefix(options, &bucket, &prefix); + log_status(s); + cos_pool_destroy(p); + + printf("test_delete_object_by_prefix ok\n"); +} + +void test_acl() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_acl_e cos_acl = COS_ACL_PRIVATE; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.txt"); + + // put acl + cos_string_t read; + cos_str_set(&read, "id=\"qcs::cam::uin/12345:uin/12345\", id=\"qcs::cam::uin/45678:uin/45678\""); + s = cos_put_bucket_acl(options, &bucket, cos_acl, &read, NULL, NULL, &resp_headers); + log_status(s); + + // get acl + cos_acl_params_t *acl_params = NULL; + acl_params = cos_create_acl_params(p); + s = cos_get_bucket_acl(options, &bucket, acl_params, &resp_headers); + log_status(s); + printf("acl owner id:%s, name:%s\n", acl_params->owner_id.data, acl_params->owner_name.data); + cos_acl_grantee_content_t *acl_content = NULL; + cos_list_for_each_entry(cos_acl_grantee_content_t, acl_content, &acl_params->grantee_list, node) { + printf("acl grantee type:%s, id:%s, name:%s, permission:%s\n", acl_content->type.data, acl_content->id.data, + acl_content->name.data, acl_content->permission.data); + } + + // put acl + s = cos_put_object_acl(options, &bucket, &object, cos_acl, &read, NULL, NULL, &resp_headers); + log_status(s); + + // get acl + cos_acl_params_t *acl_params2 = NULL; + acl_params2 = cos_create_acl_params(p); + s = cos_get_object_acl(options, &bucket, &object, acl_params2, &resp_headers); + log_status(s); + printf("acl owner id:%s, name:%s\n", acl_params2->owner_id.data, acl_params2->owner_name.data); + acl_content = NULL; + cos_list_for_each_entry(cos_acl_grantee_content_t, acl_content, &acl_params2->grantee_list, node) { + printf("acl grantee id:%s, name:%s, permission:%s\n", acl_content->id.data, acl_content->name.data, + acl_content->permission.data); + } + + cos_pool_destroy(p); +} + +void test_copy() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //设置对象复制 + cos_str_set(&object, TEST_OBJECT_NAME2); + cos_str_set(&src_bucket, TEST_BUCKET_NAME); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME1); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, NULL, params, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object copy succeeded\n"); + } else { + printf("put object copy failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_modify_storage_class() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + cos_str_set(&src_bucket, TEST_BUCKET_NAME); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME1); + + // 设置x-cos-metadata-directive和x-cos-storage-class头域(替换为自己要更改的存储类型) + cos_table_t *headers = cos_table_make(p, 2); + apr_table_add(headers, "x-cos-metadata-directive", "Replaced"); + // 存储类型包括NTELLIGENT_TIERING,MAZ_INTELLIGENT_TIERING,STANDARD_IA,ARCHIVE,DEEP_ARCHIVE + apr_table_add(headers, "x-cos-storage-class", "ARCHIVE"); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, headers, params, + &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_copy_mt() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_copy.txt"); + cos_str_set(&src_bucket, "mybucket-1253685564"); + cos_str_set(&src_endpoint, "cn-south.myqcloud.com"); + cos_str_set(&src_object, "test.txt"); + + s = cos_upload_object_by_part_copy_mt(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, 1024 * 1024, + 8, NULL); + log_status(s); + + cos_pool_destroy(p); +} + +void test_copy_with_part_copy() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t copy_source; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_copy.txt"); + cos_str_set(©_source, "mybucket-1253685564.cn-south.myqcloud.com/test.txt"); + + s = cos_upload_object_by_part_copy(options, ©_source, &bucket, &object, 1024 * 1024); + log_status(s); + + cos_pool_destroy(p); +} + +void make_rand_string(cos_pool_t *p, int len, cos_string_t *data) { + char *str = NULL; + int i = 0; + str = (char *)cos_palloc(p, len + 1); + for (; i < len; i++) { + str[i] = 'a' + rand() % 32; + } + str[len] = '\0'; + cos_str_set(data, str); +} + +unsigned long get_file_size(const char *file_path) { + unsigned long filesize = -1; + struct stat statbuff; + + if (stat(file_path, &statbuff) < 0) { + return filesize; + } else { + filesize = statbuff.st_size; + } + + return filesize; +} + +void test_part_copy() { + cos_pool_t *p = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + int is_cname = 0; + cos_string_t upload_id; + cos_list_upload_part_params_t *list_upload_part_params = NULL; + cos_upload_part_copy_params_t *upload_part_copy_params1 = NULL; + cos_upload_part_copy_params_t *upload_part_copy_params2 = NULL; + cos_table_t *headers = NULL; + cos_table_t *query_params = NULL; + cos_table_t *resp_headers = NULL; + cos_table_t *list_part_resp_headers = NULL; + cos_list_t complete_part_list; + cos_list_part_content_t *part_content = NULL; + cos_complete_part_content_t *complete_content = NULL; + cos_table_t *complete_resp_headers = NULL; + cos_status_t *s = NULL; + int part1 = 1; + int part2 = 2; + char *local_filename = "test_upload_part_copy.file"; + char *download_filename = "test_upload_part_copy.file.download"; + char *source_object_name = "cos_test_upload_part_copy_source_object"; + char *dest_object_name = "cos_test_upload_part_copy_dest_object"; + FILE *fd = NULL; + cos_string_t download_file; + cos_string_t dest_bucket; + cos_string_t dest_object; + int64_t range_start1 = 0; + int64_t range_end1 = 6000000; + int64_t range_start2 = 6000001; + int64_t range_end2; + cos_string_t data; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + + // create multipart upload local file + make_rand_string(p, 10 * 1024 * 1024, &data); + fd = fopen(local_filename, "w"); + fwrite(data.data, sizeof(data.data[0]), data.len, fd); + fclose(fd); + + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, source_object_name); + cos_str_set(&file, local_filename); + s = cos_put_object_from_file(options, &bucket, &object, &file, NULL, &resp_headers); + log_status(s); + + // init mulitipart + cos_str_set(&object, dest_object_name); + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, NULL, &resp_headers); + log_status(s); + + // upload part copy 1 + upload_part_copy_params1 = cos_create_upload_part_copy_params(p); + cos_str_set(&upload_part_copy_params1->copy_source, + "bucket-appid.cn-south.myqcloud.com/cos_test_upload_part_copy_source_object"); + cos_str_set(&upload_part_copy_params1->dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&upload_part_copy_params1->dest_object, dest_object_name); + cos_str_set(&upload_part_copy_params1->upload_id, upload_id.data); + upload_part_copy_params1->part_num = part1; + upload_part_copy_params1->range_start = range_start1; + upload_part_copy_params1->range_end = range_end1; + headers = cos_table_make(p, 0); + s = cos_upload_part_copy(options, upload_part_copy_params1, headers, &resp_headers); + log_status(s); + printf("last modified:%s, etag:%s\n", upload_part_copy_params1->rsp_content->last_modify.data, + upload_part_copy_params1->rsp_content->etag.data); + + // upload part copy 2 + resp_headers = NULL; + range_end2 = get_file_size(local_filename) - 1; + upload_part_copy_params2 = cos_create_upload_part_copy_params(p); + cos_str_set(&upload_part_copy_params2->copy_source, + "bucket-appid.cn-south.myqcloud.com/cos_test_upload_part_copy_source_object"); + cos_str_set(&upload_part_copy_params2->dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&upload_part_copy_params2->dest_object, dest_object_name); + cos_str_set(&upload_part_copy_params2->upload_id, upload_id.data); + upload_part_copy_params2->part_num = part2; + upload_part_copy_params2->range_start = range_start2; + upload_part_copy_params2->range_end = range_end2; + headers = cos_table_make(p, 0); + s = cos_upload_part_copy(options, upload_part_copy_params2, headers, &resp_headers); + log_status(s); + printf("last modified:%s, etag:%s\n", upload_part_copy_params1->rsp_content->last_modify.data, + upload_part_copy_params1->rsp_content->etag.data); + + // list part + list_upload_part_params = cos_create_list_upload_part_params(p); + list_upload_part_params->max_ret = 10; + cos_list_init(&complete_part_list); + + cos_str_set(&dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&dest_object, dest_object_name); + s = cos_list_upload_part(options, &dest_bucket, &dest_object, &upload_id, list_upload_part_params, + &list_part_resp_headers); + log_status(s); + cos_list_for_each_entry(cos_list_part_content_t, part_content, &list_upload_part_params->part_list, node) { + complete_content = cos_create_complete_part_content(p); + cos_str_set(&complete_content->part_number, part_content->part_number.data); + cos_str_set(&complete_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_content->node, &complete_part_list); + } + + // complete multipart + headers = cos_table_make(p, 0); + s = cos_complete_multipart_upload(options, &dest_bucket, &dest_object, &upload_id, &complete_part_list, headers, + &complete_resp_headers); + log_status(s); + + // check upload copy part content equal to local file + headers = cos_table_make(p, 0); + cos_str_set(&download_file, download_filename); + s = cos_get_object_to_file(options, &dest_bucket, &dest_object, headers, query_params, &download_file, &resp_headers); + log_status(s); + printf("local file len = %" APR_INT64_T_FMT ", download file len = %" APR_INT64_T_FMT, get_file_size(local_filename), + get_file_size(download_filename)); + remove(download_filename); + remove(local_filename); + cos_pool_destroy(p); + + printf("test part copy ok\n"); +} + +void test_cors() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_t rule_list; + cos_list_init(&rule_list); + cos_cors_rule_content_t *rule_content = NULL; + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule1"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq1.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "xxx"); + rule_content->max_age_seconds = 3600; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule2"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq2.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "yyy"); + rule_content->max_age_seconds = 7200; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule3"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq3.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "zzz"); + rule_content->max_age_seconds = 60; + cos_list_add_tail(&rule_content->node, &rule_list); + + // put cors + s = cos_put_bucket_cors(options, &bucket, &rule_list, &resp_headers); + log_status(s); + + // get cors + cos_list_t rule_list_ret; + cos_list_init(&rule_list_ret); + s = cos_get_bucket_cors(options, &bucket, &rule_list_ret, &resp_headers); + log_status(s); + cos_cors_rule_content_t *content = NULL; + cos_list_for_each_entry(cos_cors_rule_content_t, content, &rule_list_ret, node) { + printf( + "cors id:%s, allowed_origin:%s, allowed_method:%s, allowed_header:%s, expose_header:%s, max_age_seconds:%d\n", + content->id.data, content->allowed_origin.data, content->allowed_method.data, content->allowed_header.data, + content->expose_header.data, content->max_age_seconds); + } + + // delete cors + cos_delete_bucket_cors(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_versioning() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_versioning_content_t *versioning = NULL; + versioning = cos_create_versioning_content(p); + cos_str_set(&versioning->status, "Suspended"); + + // put bucket versioning + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + + // get bucket versioning + cos_str_set(&versioning->status, ""); + s = cos_get_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + printf("bucket versioning status: %s\n", versioning->status.data); + + cos_pool_destroy(p); +} + +void test_replication() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_request_options_t *dst_options = NULL; + cos_string_t bucket; + cos_string_t dst_bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&dst_bucket, "replicationtest"); + + dst_options = cos_request_options_create(p); + init_test_request_options(dst_options, is_cname); + cos_str_set(&dst_options->config->endpoint, "cn-east.myqcloud.com"); + + // enable bucket versioning + cos_versioning_content_t *versioning = NULL; + versioning = cos_create_versioning_content(p); + cos_str_set(&versioning->status, "Enabled"); + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + s = cos_put_bucket_versioning(dst_options, &dst_bucket, versioning, &resp_headers); + log_status(s); + + cos_replication_params_t *replication_param = NULL; + replication_param = cos_create_replication_params(p); + cos_str_set(&replication_param->role, "qcs::cam::uin/100000616666:uin/100000616666"); + + cos_replication_rule_content_t *rule = NULL; + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_01"); + cos_str_set(&rule->status, "Enabled"); + cos_str_set(&rule->prefix, "test1"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_02"); + cos_str_set(&rule->status, "Disabled"); + cos_str_set(&rule->prefix, "test2"); + cos_str_set(&rule->storage_class, "Standard_IA"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_03"); + cos_str_set(&rule->status, "Enabled"); + cos_str_set(&rule->prefix, "test3"); + cos_str_set(&rule->storage_class, "Standard_IA"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + // put bucket replication + s = cos_put_bucket_replication(options, &bucket, replication_param, &resp_headers); + log_status(s); + + // get bucket replication + cos_replication_params_t *replication_param2 = NULL; + replication_param2 = cos_create_replication_params(p); + s = cos_get_bucket_replication(options, &bucket, replication_param2, &resp_headers); + log_status(s); + printf("ReplicationConfiguration role: %s\n", replication_param2->role.data); + cos_replication_rule_content_t *content = NULL; + cos_list_for_each_entry(cos_replication_rule_content_t, content, &replication_param2->rule_list, node) { + printf("ReplicationConfiguration rule, id:%s, status:%s, prefix:%s, dst_bucket:%s, storage_class:%s\n", + content->id.data, content->status.data, content->prefix.data, content->dst_bucket.data, + content->storage_class.data); + } + + // delete bucket replication + s = cos_delete_bucket_replication(options, &bucket, &resp_headers); + log_status(s); + + // disable bucket versioning + cos_str_set(&versioning->status, "Suspended"); + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + s = cos_put_bucket_versioning(dst_options, &dst_bucket, versioning, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_presigned_url() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t presigned_url; + cos_table_t *params = NULL; + cos_table_t *headers = NULL; + int sign_host = 1; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + cos_gen_presigned_url(options, &bucket, &object, 300, HTTP_GET, &presigned_url); + printf("presigned_url: %s\n", presigned_url.data); + + // 添加您自己的params和headers + params = cos_table_make(options->pool, 0); + // cos_table_add(params, "param1", "value"); + headers = cos_table_make(options->pool, 0); + // cos_table_add(headers, "header1", "value"); + + // 强烈建议sign_host为1,这样强制把host头域加入签名列表,防止越权访问问题 + cos_gen_presigned_url_safe(options, &bucket, &object, 300, HTTP_GET, headers, params, sign_host, &presigned_url); + printf("presigned_url_safe: %s\n", presigned_url.data); + + cos_pool_destroy(p); +} + +void test_head_bucket() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + options->ctl = cos_http_controller_create(options->pool, 0); + + status = cos_head_bucket(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_check_bucket_exist() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + cos_bucket_exist_status_e bucket_exist; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 检查桶是否存在 + status = cos_check_bucket_exist(options, &bucket, &bucket_exist, &resp_headers); + if (bucket_exist == COS_BUCKET_NON_EXIST) { + printf("bucket: %.*s non exist.\n", bucket.len, bucket.data); + } else if (bucket_exist == COS_BUCKET_EXIST) { + printf("bucket: %.*s exist.\n", bucket.len, bucket.data); + } else { + printf("bucket: %.*s unknown status.\n", bucket.len, bucket.data); + log_status(status); + } + + cos_pool_destroy(pool); +} + +void test_get_service() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_get_service_params_t *list_params = NULL; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + + //创建get service参数, 默认获取全部bucket + list_params = cos_create_get_service_params(options->pool); + //若将all_region设置为0,则只根据options->config->endpoint的区域进行查询 + // list_params->all_region = 0; + + status = cos_get_service(options, list_params, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + cos_get_service_content_t *content = NULL; + char *line = NULL; + cos_list_for_each_entry(cos_get_service_content_t, content, &list_params->bucket_list, node) { + line = apr_psprintf(options->pool, "%.*s\t%.*s\t%.*s\n", content->bucket_name.len, content->bucket_name.data, + content->location.len, content->location.data, content->creation_date.len, + content->creation_date.data); + printf("%s", line); + } + + cos_pool_destroy(pool); +} + +void test_website() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_website_params_t *website_params = NULL; + cos_website_params_t *website_result = NULL; + cos_website_rule_content_t *website_content = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建website参数 + website_params = cos_create_website_params(options->pool); + cos_str_set(&website_params->index, "index.html"); + cos_str_set(&website_params->redirect_protocol, "https"); + cos_str_set(&website_params->error_document, "Error.html"); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_errcode, "404"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key, "404.html"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_prefix, "docs/"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key_prefix, "documents/"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_prefix, "img/"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key, "demo.jpg"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + status = cos_put_bucket_website(options, &bucket, website_params, &resp_headers); + log_status(status); + + website_result = cos_create_website_params(options->pool); + status = cos_get_bucket_website(options, &bucket, website_result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + cos_website_rule_content_t *content = NULL; + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", website_result->index.len, website_result->index.data); + printf("index: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", website_result->redirect_protocol.len, + website_result->redirect_protocol.data); + printf("redirect protocol: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", website_result->error_document.len, website_result->error_document.data); + printf("error document: %s", line); + cos_list_for_each_entry(cos_website_rule_content_t, content, &website_result->rule_list, node) { + line = apr_psprintf(options->pool, "%.*s\t%.*s\t%.*s\t%.*s\t%.*s\n", content->condition_errcode.len, + content->condition_errcode.data, content->condition_prefix.len, content->condition_prefix.data, + content->redirect_protocol.len, content->redirect_protocol.data, + content->redirect_replace_key.len, content->redirect_replace_key.data, + content->redirect_replace_key_prefix.len, content->redirect_replace_key_prefix.data); + printf("%s", line); + } + + status = cos_delete_bucket_website(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_domain() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_domain_params_t *domain_params = NULL; + cos_domain_params_t *domain_result = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建domain参数 + domain_params = cos_create_domain_params(options->pool); + cos_str_set(&domain_params->status, "ENABLED"); + cos_str_set(&domain_params->name, "www.abc.com"); + cos_str_set(&domain_params->type, "REST"); + cos_str_set(&domain_params->forced_replacement, "CNAME"); + + status = cos_put_bucket_domain(options, &bucket, domain_params, &resp_headers); + log_status(status); + + domain_result = cos_create_domain_params(options->pool); + status = cos_get_bucket_domain(options, &bucket, domain_result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", domain_result->status.len, domain_result->status.data); + printf("status: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->name.len, domain_result->name.data); + printf("name: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->type.len, domain_result->type.data); + printf("type: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->forced_replacement.len, + domain_result->forced_replacement.data); + printf("forced_replacement: %s", line); + + cos_pool_destroy(pool); +} + +void test_logging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_logging_params_t *params = NULL; + cos_logging_params_t *result = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建logging参数 + params = cos_create_logging_params(options->pool); + cos_str_set(¶ms->target_bucket, TEST_BUCKET_NAME); + cos_str_set(¶ms->target_prefix, "logging/"); + + status = cos_put_bucket_logging(options, &bucket, params, &resp_headers); + log_status(status); + + result = cos_create_logging_params(options->pool); + status = cos_get_bucket_logging(options, &bucket, result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", result->target_bucket.len, result->target_bucket.data); + printf("target bucket: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", result->target_prefix.len, result->target_prefix.data); + printf("target prefix: %s", line); + + cos_pool_destroy(pool); +} + +void test_inventory() { + cos_pool_t *pool = NULL; + int is_cname = 0; + int inum = 3, i, len; + char buf[inum][32]; + char dest_bucket[128]; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_inventory_params_t *get_params = NULL; + cos_inventory_optional_t *optional = NULL; + cos_list_inventory_params_t *list_params = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put bucket inventory + len = snprintf(dest_bucket, 128, "qcs::cos:%s::%s", TEST_REGION, TEST_BUCKET_NAME); + dest_bucket[len] = 0; + for (i = 0; i < inum; i++) { + cos_inventory_params_t *params = cos_create_inventory_params(pool); + cos_inventory_optional_t *optional; + len = snprintf(buf[i], 32, "id%d", i); + buf[i][len] = 0; + cos_str_set(¶ms->id, buf[i]); + cos_str_set(¶ms->is_enabled, "true"); + cos_str_set(¶ms->frequency, "Daily"); + cos_str_set(¶ms->filter_prefix, "myPrefix"); + cos_str_set(¶ms->included_object_versions, "All"); + cos_str_set(¶ms->destination.format, "CSV"); + cos_str_set(¶ms->destination.account_id, TEST_UIN); + cos_str_set(¶ms->destination.bucket, dest_bucket); + cos_str_set(¶ms->destination.prefix, "invent"); + params->destination.encryption = 1; + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "Size"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "LastModifiedDate"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "ETag"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "StorageClass"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "ReplicationStatus"); + cos_list_add_tail(&optional->node, ¶ms->fields); + + status = cos_put_bucket_inventory(options, &bucket, params, &resp_headers); + log_status(status); + } + + // get inventory + get_params = cos_create_inventory_params(pool); + cos_str_set(&get_params->id, buf[inum / 2]); + status = cos_get_bucket_inventory(options, &bucket, get_params, &resp_headers); + log_status(status); + + printf("id: %s\nis_enabled: %s\nfrequency: %s\nfilter_prefix: %s\nincluded_object_versions: %s\n", + get_params->id.data, get_params->is_enabled.data, get_params->frequency.data, get_params->filter_prefix.data, + get_params->included_object_versions.data); + printf("destination:\n"); + printf("\tencryption: %d\n", get_params->destination.encryption); + printf("\tformat: %s\n", get_params->destination.format.data); + printf("\taccount_id: %s\n", get_params->destination.account_id.data); + printf("\tbucket: %s\n", get_params->destination.bucket.data); + printf("\tprefix: %s\n", get_params->destination.prefix.data); + cos_list_for_each_entry(cos_inventory_optional_t, optional, &get_params->fields, node) { + printf("field: %s\n", optional->field.data); + } + + // list inventory + list_params = cos_create_list_inventory_params(pool); + status = cos_list_bucket_inventory(options, &bucket, list_params, &resp_headers); + log_status(status); + + get_params = NULL; + cos_list_for_each_entry(cos_inventory_params_t, get_params, &list_params->inventorys, node) { + printf("id: %s\nis_enabled: %s\nfrequency: %s\nfilter_prefix: %s\nincluded_object_versions: %s\n", + get_params->id.data, get_params->is_enabled.data, get_params->frequency.data, get_params->filter_prefix.data, + get_params->included_object_versions.data); + printf("destination:\n"); + printf("\tencryption: %d\n", get_params->destination.encryption); + printf("\tformat: %s\n", get_params->destination.format.data); + printf("\taccount_id: %s\n", get_params->destination.account_id.data); + printf("\tbucket: %s\n", get_params->destination.bucket.data); + printf("\tprefix: %s\n", get_params->destination.prefix.data); + cos_list_for_each_entry(cos_inventory_optional_t, optional, &get_params->fields, node) { + printf("field: %s\n", optional->field.data); + } + } + + // delete inventory + for (i = 0; i < inum; i++) { + cos_string_t id; + cos_str_set(&id, buf[i]); + status = cos_delete_bucket_inventory(options, &bucket, &id, &resp_headers); + log_status(status); + } + + cos_pool_destroy(pool); +} + +void test_bucket_tagging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_tagging_params_t *params = NULL; + cos_tagging_params_t *result = NULL; + cos_tagging_tag_t *tag = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put tagging + params = cos_create_tagging_params(pool); + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "age"); + cos_str_set(&tag->value, "18"); + cos_list_add_tail(&tag->node, ¶ms->node); + + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "name"); + cos_str_set(&tag->value, "xiaoming"); + cos_list_add_tail(&tag->node, ¶ms->node); + + status = cos_put_bucket_tagging(options, &bucket, params, &resp_headers); + log_status(status); + + // get tagging + result = cos_create_tagging_params(pool); + status = cos_get_bucket_tagging(options, &bucket, result, &resp_headers); + log_status(status); + + tag = NULL; + cos_list_for_each_entry(cos_tagging_tag_t, tag, &result->node, node) { + printf("taging key: %s\n", tag->key.data); + printf("taging value: %s\n", tag->value.data); + } + + // delete tagging + status = cos_delete_bucket_tagging(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_object_tagging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t version_id = cos_string(""); + cos_tagging_params_t *params = NULL; + cos_tagging_params_t *result = NULL; + cos_tagging_tag_t *tag = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + // put object tagging + params = cos_create_tagging_params(pool); + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "age"); + cos_str_set(&tag->value, "18"); + cos_list_add_tail(&tag->node, ¶ms->node); + + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "name"); + cos_str_set(&tag->value, "xiaoming"); + cos_list_add_tail(&tag->node, ¶ms->node); + + status = cos_put_object_tagging(options, &bucket, &object, &version_id, NULL, params, &resp_headers); + log_status(status); + + // get object tagging + result = cos_create_tagging_params(pool); + status = cos_get_object_tagging(options, &bucket, &object, &version_id, NULL, result, &resp_headers); + log_status(status); + + tag = NULL; + cos_list_for_each_entry(cos_tagging_tag_t, tag, &result->node, node) { + printf("taging key: %s\n", tag->key.data); + printf("taging value: %s\n", tag->value.data); + } + + // delete tagging + status = cos_delete_object_tagging(options, &bucket, &object, &version_id, NULL, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +static void log_get_referer(cos_referer_params_t *result) { + int index = 0; + cos_referer_domain_t *domain; + + cos_warn_log("status: %s", result->status.data); + cos_warn_log("referer_type: %s", result->referer_type.data); + cos_warn_log("empty_refer_config: %s", result->empty_refer_config.data); + + cos_list_for_each_entry(cos_referer_domain_t, domain, &result->domain_list, node) { + cos_warn_log("domain index:%d", ++index); + cos_warn_log("domain: %s", domain->domain.data); + } +} + +void test_referer() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_referer_params_t *params = NULL; + cos_referer_domain_t *domain = NULL; + cos_referer_params_t *result = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/32492 + params = cos_create_referer_params(pool); + cos_str_set(¶ms->status, "Enabled"); + cos_str_set(¶ms->referer_type, "White-List"); + cos_str_set(¶ms->empty_refer_config, "Allow"); + domain = cos_create_referer_domain(pool); + cos_str_set(&domain->domain, "www.qq.com"); + cos_list_add_tail(&domain->node, ¶ms->domain_list); + domain = cos_create_referer_domain(pool); + cos_str_set(&domain->domain, "*.tencent.com"); + cos_list_add_tail(&domain->node, ¶ms->domain_list); + + // put referer + status = cos_put_bucket_referer(options, &bucket, params, &resp_headers); + log_status(status); + + // get referer + result = cos_create_referer_params(pool); + status = cos_get_bucket_referer(options, &bucket, result, &resp_headers); + log_status(status); + if (status->code == 200) { + log_get_referer(result); + } + + cos_pool_destroy(pool); +} + +void test_intelligenttiering() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_intelligenttiering_params_t *params = NULL; + cos_intelligenttiering_params_t *result = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put intelligenttiering + params = cos_create_intelligenttiering_params(pool); + cos_str_set(¶ms->status, "Enabled"); + params->days = 30; + + status = cos_put_bucket_intelligenttiering(options, &bucket, params, &resp_headers); + log_status(status); + + // get intelligenttiering + result = cos_create_intelligenttiering_params(pool); + status = cos_get_bucket_intelligenttiering(options, &bucket, result, &resp_headers); + log_status(status); + + printf("status: %s\n", result->status.data); + printf("days: %d\n", result->days); + cos_pool_destroy(pool); +} + +void test_delete_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->prefix, "folder/"); + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + + s = cos_delete_objects(options, &bucket, &list_params->object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("delete objects failed, req_id:%s\n", s->req_id); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_list_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + // prefix表示列出的object的key以prefix开始 + cos_str_set(&list_params->prefix, "folder/"); + // deliter表示分隔符, 设置为/表示列出当前目录下的object, 设置为空表示列出所有的object + cos_str_set(&list_params->delimiter, "/"); + // 设置最大遍历出多少个对象, 一次listobject最大支持1000 + list_params->max_ret = 1000; + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + // list_params->object_list 返回列出的object对象。 + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %s\n", content->key.data); + } + // list_params->common_prefix_list 表示被delimiter截断的路径, 如delimter设置为/, common prefix则表示所有子目录的路径 + cos_list_object_common_prefix_t *common_prefix = NULL; + cos_list_for_each_entry(cos_list_object_common_prefix_t, common_prefix, &list_params->common_prefix_list, node) { + printf("common prefix: %s\n", common_prefix->prefix.data); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_list_all_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + // 设置最大遍历出多少个对象, 一次listobject最大支持1000 + list_params->max_ret = 1000; + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + cos_list_init(&list_params->object_list); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + // list_params->object_list 返回列出的object对象。 + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %s\n", content->key.data); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_download_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t file_name; + cos_string_t suffix = cos_string("/"); + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_table_t *params = NULL; + int is_truncated = 1; + cos_string_t marker; + apr_status_t status; + + //初始化请求选项 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->prefix, "folder/"); //替换为您自己的目录名称 + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + cos_str_set(&file_name, content->key.data); + if (cos_ends_with(&content->key, &suffix)) { + //如果是目录需要先创建, 0x0755权限可以自己按需修改,参考apr_file_info.h中定义 + status = apr_dir_make(content->key.data, 0x0755, options->pool); + if (status != APR_SUCCESS && !APR_STATUS_IS_EEXIST(status)) { + printf("mkdir: %s failed, status: %d\n", content->key.data, status); + } + } else { + //下载对象到本地目录,这里默认下载在程序运行的当前目录 + s = cos_get_object_to_file(options, &bucket, &content->key, headers, params, &file_name, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("get object[%s] failed, req_id:%s\n", content->key.data, s->req_id); + } + } + } + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_move() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //设置对象复制 + cos_str_set(&object, TEST_OBJECT_NAME1); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME2); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &bucket, &src_object, &src_endpoint, &bucket, &object, NULL, params, &resp_headers); + log_status(s); + if (cos_status_is_ok(s)) { + s = cos_delete_object(options, &bucket, &src_object, &resp_headers); + log_status(s); + printf("move object succeeded\n"); + } else { + printf("move object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +// 基础图片处理 +void test_ci_base_image_process() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/thumbnail/!50p", ""); + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } + cos_pool_destroy(p); +} + +// 持久化处理 +void test_ci_image_process() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + ci_operation_result_t *results = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.jpg"); + + // 云上数据处理 + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"imageView2/format/png\"}]}"); + s = ci_image_process(options, &bucket, &object, headers, &resp_headers, &results); + log_status(s); + printf("origin key: %s\n", results->origin.key.data); + printf("process key: %s\n", results->object.key.data); + + // 上传时处理 + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"imageView2/format/png\"}]}"); + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = ci_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers, &results); + log_status(s); + printf("origin key: %s\n", results->origin.key.data); + printf("process key: %s\n", results->object.key.data); + + cos_pool_destroy(p); +} + +// 二维码识别 +void test_ci_image_qrcode() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + ci_operation_result_t *results = NULL; + ci_qrcode_info_t *content = NULL; + ci_qrcode_result_t *result2 = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.jpg"); + + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"QRcode/cover/1\"}]}"); + // 上传时识别 + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = ci_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers, &results); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("put object failed\n"); + } + printf("CodeStatus: %d\n", results->object.code_status); + cos_list_for_each_entry(ci_qrcode_info_t, content, &results->object.qrcode_info, node) { + printf("CodeUrl: %s\n", content->code_url.data); + printf("Point: %s\n", content->point[0].data); + printf("Point: %s\n", content->point[1].data); + printf("Point: %s\n", content->point[2].data); + printf("Point: %s\n", content->point[3].data); + } + + // 下载时识别 + s = ci_get_qrcode(options, &bucket, &object, 1, NULL, NULL, &resp_headers, &result2); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("get object failed\n"); + } + printf("CodeStatus: %d\n", result2->code_status); + cos_list_for_each_entry(ci_qrcode_info_t, content, &result2->qrcode_info, node) { + printf("CodeUrl: %s\n", content->code_url.data); + printf("Point: %s\n", content->point[0].data); + printf("Point: %s\n", content->point[1].data); + printf("Point: %s\n", content->point[2].data); + printf("Point: %s\n", content->point[3].data); + } + printf("ImageResult: %s\n", result2->result_image.data); + + //销毁内存池 + cos_pool_destroy(p); +} + +// 图片压缩 +void test_ci_image_compression() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/format/tpg", ""); + cos_str_set(&object, "test.jpg"); + cos_str_set(&file, "test.tpg"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/format/heif", ""); + cos_str_set(&file, "test.heif"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } +} + +static void log_video_auditing_result(ci_video_auditing_job_result_t *result) { + cos_warn_log("jobid: %s", result->jobs_detail.job_id.data); + cos_warn_log("state: %s", result->jobs_detail.state.data); + cos_warn_log("creation_time: %s", result->jobs_detail.creation_time.data); +} + +static void log_get_auditing_result(ci_auditing_job_result_t *result) { + int index = 0; + ci_auditing_snapshot_result_t *snapshot_info; + ci_auditing_audio_section_result_t *audio_section_info; + + cos_warn_log("nonexist_job_ids: %s", result->nonexist_job_ids.data); + cos_warn_log("code: %s", result->jobs_detail.code.data); + cos_warn_log("message: %s", result->jobs_detail.message.data); + cos_warn_log("state: %s", result->jobs_detail.state.data); + cos_warn_log("creation_time: %s", result->jobs_detail.creation_time.data); + cos_warn_log("object: %s", result->jobs_detail.object.data); + cos_warn_log("snapshot_count: %s", result->jobs_detail.snapshot_count.data); + cos_warn_log("result: %d", result->jobs_detail.result); + + cos_warn_log("porn_info.hit_flag: %d", result->jobs_detail.porn_info.hit_flag); + cos_warn_log("porn_info.count: %d", result->jobs_detail.porn_info.count); + cos_warn_log("terrorism_info.hit_flag: %d", result->jobs_detail.terrorism_info.hit_flag); + cos_warn_log("terrorism_info.count: %d", result->jobs_detail.terrorism_info.count); + cos_warn_log("politics_info.hit_flag: %d", result->jobs_detail.politics_info.hit_flag); + cos_warn_log("politics_info.count: %d", result->jobs_detail.politics_info.count); + cos_warn_log("ads_info.hit_flag: %d", result->jobs_detail.ads_info.hit_flag); + cos_warn_log("ads_info.count: %d", result->jobs_detail.ads_info.count); + + cos_list_for_each_entry(ci_auditing_snapshot_result_t, snapshot_info, &result->jobs_detail.snapshot_info_list, node) { + cos_warn_log("snapshot index:%d", ++index); + cos_warn_log("snapshot_info->url: %s", snapshot_info->url.data); + cos_warn_log("snapshot_info->snapshot_time: %d", snapshot_info->snapshot_time); + cos_warn_log("snapshot_info->text: %s", snapshot_info->text.data); + + cos_warn_log("snapshot_info->porn_info.hit_flag: %d", snapshot_info->porn_info.hit_flag); + cos_warn_log("snapshot_info->porn_info.score: %d", snapshot_info->porn_info.score); + cos_warn_log("snapshot_info->porn_info.label: %s", snapshot_info->porn_info.label.data); + cos_warn_log("snapshot_info->porn_info.sub_lable: %s", snapshot_info->porn_info.sub_lable.data); + cos_warn_log("snapshot_info->terrorism_info.hit_flag: %d", snapshot_info->terrorism_info.hit_flag); + cos_warn_log("snapshot_info->terrorism_info.score: %d", snapshot_info->terrorism_info.score); + cos_warn_log("snapshot_info->terrorism_info.label: %s", snapshot_info->terrorism_info.label.data); + cos_warn_log("snapshot_info->terrorism_info.sub_lable: %s", snapshot_info->terrorism_info.sub_lable.data); + cos_warn_log("snapshot_info->politics_info.hit_flag: %d", snapshot_info->politics_info.hit_flag); + cos_warn_log("snapshot_info->politics_info.score: %d", snapshot_info->politics_info.score); + cos_warn_log("snapshot_info->politics_info.label: %s", snapshot_info->politics_info.label.data); + cos_warn_log("snapshot_info->politics_info.sub_lable: %s", snapshot_info->politics_info.sub_lable.data); + cos_warn_log("snapshot_info->ads_info.hit_flag: %d", snapshot_info->ads_info.hit_flag); + cos_warn_log("snapshot_info->ads_info.score: %d", snapshot_info->ads_info.score); + cos_warn_log("snapshot_info->ads_info.label: %s", snapshot_info->ads_info.label.data); + cos_warn_log("snapshot_info->ads_info.sub_lable: %s", snapshot_info->ads_info.sub_lable.data); + } + + index = 0; + cos_list_for_each_entry(ci_auditing_audio_section_result_t, audio_section_info, + &result->jobs_detail.audio_section_info_list, node) { + cos_warn_log("audio_section index:%d", ++index); + cos_warn_log("audio_section_info->url: %s", audio_section_info->url.data); + cos_warn_log("audio_section_info->text: %s", audio_section_info->text.data); + cos_warn_log("audio_section_info->offset_time: %d", audio_section_info->offset_time); + cos_warn_log("audio_section_info->duration: %d", audio_section_info->duration); + + cos_warn_log("audio_section_info->porn_info.hit_flag: %d", audio_section_info->porn_info.hit_flag); + cos_warn_log("audio_section_info->porn_info.score: %d", audio_section_info->porn_info.score); + cos_warn_log("audio_section_info->porn_info.key_words: %s", audio_section_info->porn_info.key_words.data); + cos_warn_log("audio_section_info->terrorism_info.hit_flag: %d", audio_section_info->terrorism_info.hit_flag); + cos_warn_log("audio_section_info->terrorism_info.score: %d", audio_section_info->terrorism_info.score); + cos_warn_log("audio_section_info->terrorism_info.key_words: %s", audio_section_info->terrorism_info.key_words.data); + cos_warn_log("audio_section_info->politics_info.hit_flag: %d", audio_section_info->politics_info.hit_flag); + cos_warn_log("audio_section_info->politics_info.score: %d", audio_section_info->politics_info.score); + cos_warn_log("audio_section_info->politics_info.key_words: %s", audio_section_info->politics_info.key_words.data); + cos_warn_log("audio_section_info->ads_info.hit_flag: %d", audio_section_info->ads_info.hit_flag); + cos_warn_log("audio_section_info->ads_info.score: %d", audio_section_info->ads_info.score); + cos_warn_log("audio_section_info->ads_info.key_words: %s", audio_section_info->ads_info.key_words.data); + } +} + +static void log_media_buckets_result(ci_media_buckets_result_t *result) { + int index = 0; + ci_media_bucket_list_t *media_bucket; + + cos_warn_log("total_count: %d", result->total_count); + cos_warn_log("page_number: %d", result->page_number); + cos_warn_log("page_size: %d", result->page_size); + + cos_list_for_each_entry(ci_media_bucket_list_t, media_bucket, &result->media_bucket_list, node) { + cos_warn_log("media_bucket index:%d", ++index); + cos_warn_log("media_bucket->bucket_id: %s", media_bucket->bucket_id.data); + cos_warn_log("media_bucket->name: %s", media_bucket->name.data); + cos_warn_log("media_bucket->region: %s", media_bucket->region.data); + cos_warn_log("media_bucket->create_time: %s", media_bucket->create_time.data); + } +} + +static void log_media_info_result(ci_media_info_result_t *result) { + // format + cos_warn_log("format.num_stream: %d", result->format.num_stream); + cos_warn_log("format.num_program: %d", result->format.num_program); + cos_warn_log("format.format_name: %s", result->format.format_name.data); + cos_warn_log("format.format_long_name: %s", result->format.format_long_name.data); + cos_warn_log("format.start_time: %f", result->format.start_time); + cos_warn_log("format.duration: %f", result->format.duration); + cos_warn_log("format.bit_rate: %d", result->format.bit_rate); + cos_warn_log("format.size: %d", result->format.size); + + // stream.video + cos_warn_log("stream.video.index: %d", result->stream.video.index); + cos_warn_log("stream.video.codec_name: %s", result->stream.video.codec_name.data); + cos_warn_log("stream.video.codec_long_name: %s", result->stream.video.codec_long_name.data); + cos_warn_log("stream.video.codec_time_base: %s", result->stream.video.codec_time_base.data); + cos_warn_log("stream.video.codec_tag_string: %s", result->stream.video.codec_tag_string.data); + cos_warn_log("stream.video.codec_tag: %s", result->stream.video.codec_tag.data); + cos_warn_log("stream.video.profile: %s", result->stream.video.profile.data); + cos_warn_log("stream.video.height: %d", result->stream.video.height); + cos_warn_log("stream.video.width: %d", result->stream.video.width); + cos_warn_log("stream.video.has_b_frame: %d", result->stream.video.has_b_frame); + cos_warn_log("stream.video.ref_frames: %d", result->stream.video.ref_frames); + cos_warn_log("stream.video.sar: %s", result->stream.video.sar.data); + cos_warn_log("stream.video.dar: %s", result->stream.video.dar.data); + cos_warn_log("stream.video.pix_format: %s", result->stream.video.pix_format.data); + cos_warn_log("stream.video.field_order: %s", result->stream.video.field_order.data); + cos_warn_log("stream.video.level: %d", result->stream.video.level); + cos_warn_log("stream.video.fps: %d", result->stream.video.fps); + cos_warn_log("stream.video.avg_fps: %s", result->stream.video.avg_fps.data); + cos_warn_log("stream.video.timebase: %s", result->stream.video.timebase.data); + cos_warn_log("stream.video.start_time: %f", result->stream.video.start_time); + cos_warn_log("stream.video.duration: %f", result->stream.video.duration); + cos_warn_log("stream.video.bit_rate: %f", result->stream.video.bit_rate); + cos_warn_log("stream.video.num_frames: %d", result->stream.video.num_frames); + cos_warn_log("stream.video.language: %s", result->stream.video.language.data); + + // stream.audio + cos_warn_log("stream.audio.index: %d", result->stream.audio.index); + cos_warn_log("stream.audio.codec_name: %s", result->stream.audio.codec_name.data); + cos_warn_log("stream.audio.codec_long_name: %s", result->stream.audio.codec_long_name.data); + cos_warn_log("stream.audio.codec_time_base: %s", result->stream.audio.codec_time_base.data); + cos_warn_log("stream.audio.codec_tag_string: %s", result->stream.audio.codec_tag_string.data); + cos_warn_log("stream.audio.codec_tag: %s", result->stream.audio.codec_tag.data); + cos_warn_log("stream.audio.sample_fmt: %s", result->stream.audio.sample_fmt.data); + cos_warn_log("stream.audio.sample_rate: %d", result->stream.audio.sample_rate); + cos_warn_log("stream.audio.channel: %d", result->stream.audio.channel); + cos_warn_log("stream.audio.channel_layout: %s", result->stream.audio.channel_layout.data); + cos_warn_log("stream.audio.timebase: %s", result->stream.audio.timebase.data); + cos_warn_log("stream.audio.start_time: %f", result->stream.audio.start_time); + cos_warn_log("stream.audio.duration: %f", result->stream.audio.duration); + cos_warn_log("stream.audio.bit_rate: %f", result->stream.audio.bit_rate); + cos_warn_log("stream.audio.language: %s", result->stream.audio.language.data); + + // stream.subtitle + cos_warn_log("stream.subtitle.index: %d", result->stream.subtitle.index); + cos_warn_log("stream.subtitle.language: %s", result->stream.subtitle.language.data); +} + +void test_ci_video_auditing() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + ci_video_auditing_job_options_t *job_options; + ci_video_auditing_job_result_t *job_result; + ci_auditing_job_result_t *auditing_result; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_CI_ENDPOINT); // https://ci..myqcloud.com + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/47316 + job_options = ci_video_auditing_job_options_create(p); + cos_str_set(&job_options->input_object, "test.mp4"); + cos_str_set(&job_options->job_conf.detect_type, "Porn,Terrorism,Politics,Ads"); + cos_str_set(&job_options->job_conf.callback_version, "Detail"); + job_options->job_conf.detect_content = 1; + cos_str_set(&job_options->job_conf.snapshot.mode, "Interval"); + job_options->job_conf.snapshot.time_interval = 1.5; + job_options->job_conf.snapshot.count = 10; + + // 提交一个视频审核任务 + s = ci_create_video_auditing_job(options, &bucket, job_options, NULL, &resp_headers, &job_result); + log_status(s); + if (s->code == 200) { + log_video_auditing_result(job_result); + } + + // 等待视频审核任务完成,此处可修改您的等待时间 + sleep(300); + + // 获取审核任务结果 + s = ci_get_auditing_job(options, &bucket, &job_result->jobs_detail.job_id, NULL, &resp_headers, &auditing_result); + log_status(s); + if (s->code == 200) { + log_get_auditing_result(auditing_result); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_media_bucket() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers; + ci_media_buckets_request_t *media_buckets_request; + ci_media_buckets_result_t *media_buckets_result; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_CI_ENDPOINT); // https://ci..myqcloud.com + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/48988 + media_buckets_request = ci_media_buckets_request_create(p); + cos_str_set(&media_buckets_request->regions, ""); + cos_str_set(&media_buckets_request->bucket_names, ""); + cos_str_set(&media_buckets_request->bucket_name, ""); + cos_str_set(&media_buckets_request->page_number, "1"); + cos_str_set(&media_buckets_request->page_size, "10"); + s = ci_describe_media_buckets(options, media_buckets_request, NULL, &resp_headers, &media_buckets_result); + log_status(s); + if (s->code == 200) { + log_media_buckets_result(media_buckets_result); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_snapshot() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + cos_list_t download_buffer; + cos_string_t object; + ci_get_snapshot_request_t *snapshot_request; + cos_buf_t *content = NULL; + cos_string_t pic_file = cos_string("snapshot.jpg"); + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.mp4"); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/55671 + snapshot_request = ci_snapshot_request_create(p); + snapshot_request->time = 7.5; + snapshot_request->width = 0; + snapshot_request->height = 0; + cos_str_set(&snapshot_request->format, "jpg"); + cos_str_set(&snapshot_request->rotate, "auto"); + cos_str_set(&snapshot_request->mode, "exactframe"); + cos_list_init(&download_buffer); + + s = ci_get_snapshot_to_buffer(options, &bucket, &object, snapshot_request, NULL, &download_buffer, &resp_headers); + log_status(s); + + int64_t len = 0; + int64_t size = 0; + int64_t pos = 0; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { len += cos_buf_size(content); } + char *buf = cos_pcalloc(p, (apr_size_t)(len + 1)); + buf[len] = '\0'; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { + size = cos_buf_size(content); + memcpy(buf + pos, content->pos, (size_t)size); + pos += size; + } + cos_warn_log("Download len:%ld data=%s", len, buf); + + s = ci_get_snapshot_to_file(options, &bucket, &object, snapshot_request, NULL, &pic_file, &resp_headers); + log_status(s); + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_media_info() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + ci_media_info_result_t *media_info; + cos_string_t object; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.mp4"); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/55672 + s = ci_get_media_info(options, &bucket, &object, NULL, &resp_headers, &media_info); + log_status(s); + if (s->code == 200) { + log_media_info_result(media_info); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +int main(int argc, char *argv[]) { + // 通过环境变量获取 SECRETID 和 SECRETKEY + // TEST_ACCESS_KEY_ID = getenv("COS_SECRETID"); + // TEST_ACCESS_KEY_SECRET = getenv("COS_SECRETKEY"); + + if (cos_http_io_initialize(NULL, 0) != COSE_OK) { + exit(1); + } + + // set log level, default COS_LOG_WARN + cos_log_set_level(COS_LOG_WARN); + + // set log output, default stderr + cos_log_set_output(NULL); + + // test_intelligenttiering(); + // test_bucket_tagging(); + // test_object_tagging(); + // test_referer(); + // test_logging(); + // test_inventory(); + // test_put_object_from_file_with_sse(); + // test_get_object_to_file_with_sse(); + // test_head_bucket(); + // test_check_bucket_exist(); + // test_get_service(); + // test_website(); + // test_domain(); + // test_delete_objects(); + // test_delete_objects_by_prefix(); + // test_bucket(); + // test_bucket_lifecycle(); + // test_object_restore(); + test_put_object_from_file(); + // test_sign(); + // test_object(); + // test_put_object_with_limit(); + // test_get_object_with_limit(); + test_head_object(); + // test_gen_object_url(); + // test_list_objects(); + // test_list_directory(); + // test_list_all_objects(); + // test_create_dir(); + // test_append_object(); + // test_check_object_exist(); + // multipart_upload_file_from_file(); + // multipart_upload_file_from_buffer(); + // abort_multipart_upload(); + // list_multipart(); + + // pthread_t tid[20]; + // test_resumable_upload_with_multi_threads(); + // test_resumable(); + // test_bucket(); + // test_acl(); + // test_copy(); + // test_modify_storage_class(); + // test_cors(); + // test_versioning(); + // test_replication(); + // test_part_copy(); + // test_copy_with_part_copy(); + // test_move(); + // test_delete_directory(); + // test_download_directory(); + // test_presigned_url(); + // test_ci_base_image_process(); + // test_ci_image_process(); + // test_ci_image_qrcode(); + // test_ci_image_compression(); + // test_ci_video_auditing(); + // test_ci_media_process_media_bucket(); + // test_ci_media_process_snapshot(); + // test_ci_media_process_media_info(); + + // cos_http_io_deinitialize last + cos_http_io_deinitialize(); + + return 0; +} diff --git a/docs/en/07-develop/06-stream.md b/docs/en/07-develop/06-stream.md index 125173e60b..59a6b815cf 100644 --- a/docs/en/07-develop/06-stream.md +++ b/docs/en/07-develop/06-stream.md @@ -52,7 +52,7 @@ CREATE TABLE d1004 USING meters TAGS ("California.LosAngeles", 3); ### Create a Stream ```sql -create stream current_stream into current_stream_output_stb as select _wstart as start, _wend as end, max(current) as max_current from meters where voltage <= 220 interval (5s); +create stream current_stream trigger at_once into current_stream_output_stb as select _wstart as wstart, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s); ``` ### Write Data diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index ccf39ef581..5af3897afd 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -23,7 +23,20 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers. -Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products. +Tips:(c interface for example) +1. A consumption group consumes all data under the same topic, and different consumption groups are independent of each other; +2. A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data; +3. On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups; +4. Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block; +5. If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data; +6. If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready; +7. The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for; +8. Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again; +9. Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data; +10. Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported; +11. Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number; +12. The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four; +13. Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products. ## Data Schema and API @@ -33,40 +46,59 @@ The related schemas and APIs in various languages are described as follows: ```c -typedef struct tmq_t tmq_t; -typedef struct tmq_conf_t tmq_conf_t; -typedef struct tmq_list_t tmq_list_t; + typedef struct tmq_t tmq_t; + typedef struct tmq_conf_t tmq_conf_t; + typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); -DLL_EXPORT const char *tmq_err2str(int32_t code); + typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, + } tmq_conf_res_t; -DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); -DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; // The last version of wal + 1 + } tmq_topic_assignment; -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; -typedef enum tmq_conf_res_t tmq_conf_res_t; + DLL_EXPORT tmq_conf_t *tmq_conf_new(); + DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); + DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); + DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + DLL_EXPORT tmq_list_t *tmq_list_new(); + DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); + DLL_EXPORT void tmq_list_destroy(tmq_list_t *); + DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); + DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + + DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); + DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); + DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); + DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); + DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); + DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); + DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msg’s offset + 1 + DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); + DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); + DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1 + DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); + + DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); + DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); + DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); + DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result + DLL_EXPORT const char *tmq_err2str(int32_t code); ``` -For more information, see [C/C++ Connector](/reference/connector/cpp). - The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below. diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md index c214e11876..5ebed12b59 100644 --- a/docs/en/12-taos-sql/25-grant.md +++ b/docs/en/12-taos-sql/25-grant.md @@ -4,7 +4,7 @@ sidebar_label: Access Control description: This document describes how to manage users and permissions in TDengine. --- -This document describes how to manage permissions in TDengine. +User and Access control is a distingguished feature of TDengine enterprise edition. In this section, only the most fundamental functionalities of user and access control are demonstrated. To get the full knowledge of user and access control, please contact the TDengine team. ## Create a User diff --git a/docs/en/14-reference/03-connector/03-cpp.mdx b/docs/en/14-reference/03-connector/03-cpp.mdx index 13029dbe91..0009902425 100644 --- a/docs/en/14-reference/03-connector/03-cpp.mdx +++ b/docs/en/14-reference/03-connector/03-cpp.mdx @@ -378,7 +378,7 @@ In addition to writing data using the SQL method or the parameter binding API, w - `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` **Function description** - This interface writes the text data of the line protocol to TDengine. + - This interface writes the text data of the line protocol to TDengine. **Parameter description** - taos: database connection, established by the `taos_connect()` function. @@ -387,12 +387,13 @@ In addition to writing data using the SQL method or the parameter binding API, w - protocol: the protocol type of the lines, used to identify the text data format. - precision: precision string for the timestamp in the text data. - **return value** - TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`. + **Return value** + - TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`. In some cases, the returned TAOS_RES is `NULL`, and it is still possible to call `taos_errno()` to safely get the error code information. The returned TAOS_RES needs to be freed by the caller in order to avoid memory leaks. **Description** + The protocol type is enumerated and contains the following three formats. - TSDB_SML_LINE_PROTOCOL: InfluxDB line protocol (Line Protocol) @@ -427,3 +428,89 @@ In addition to writing data using the SQL method or the parameter binding API, w - Within _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows. - Within _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table. - Within _reqid interfaces can track the entire call chain by passing the reqid parameter. + +### Subscription API + +- `int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment)` +- `void tmq_free_assignment(tmq_topic_assignment* pAssignment)` + + tmq_topic_assignment defined as follows: + ```c + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; + } tmq_topic_assignment; + ``` + **Function description** + - tmq_get_topic_assignment get the current vgroup information of this consumer + + **Parameter description** + - numOfAssignment:the num of vgroups assigned to this consumer + - assignment:the information of vgroups, needed to be freed by tmq_free_assignment + + **Return value** + - zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)` + +- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)` + **Function description** + - get the committed offset + + **Return value** + - the value of committed offset, -2147467247 means no committed value, Other values less than 0 indicate failure + +- `int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg)` +- `void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param)` +- `int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)` +- `void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param)` + + **Function description** + + The commit interface is divided into two types, each with synchronous and asynchronous interfaces: + - The first type: based on message submission, submit the progress in the message. If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer: tmq_commit_sync/tmq_commit_async + - The second type: submit based on the offset of a Vgroup in a topic: tmq_commit_offset_sync/tmq_commit_offset_async + + **Parameter description** + - msg:Message consumed, If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer + + **Return value** + - zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)` + +- `int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId)` + + **Function description** + - Obtain the current consumption location, which is the next location of the data consumed + + **Return value** + - the current consumption location, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)` + +- `int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)` + + **Function description** + - Set the offset position of the consumer in a Vgroup of a certain topic to start consumption + + **Return value** + - zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)` + +- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)` + + **Function description** + - Obtain the starting offset of the consumed data + + **Parameter description** + - msg:Message consumed + + **Return value** + - the starting offset of the consumed data, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)` + +- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)` + + **Function description** + - Obtain a list of topics subscribed by consumers + + **Parameter description** + - topics: a list of topics subscribed by consumers,need to be freed by tmq_list_destroy + + **Return value** + - zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)` \ No newline at end of file diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx index 69bbd287ed..b12ef38ea8 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -1091,8 +1091,6 @@ public abstract class ConsumerLoop { config.setProperty("client.id", "1"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); - config.setProperty("experimental.snapshot.enable", "true"); - this.consumer = new TaosConsumer<>(config); this.topics = Collections.singletonList("topic_speed"); @@ -1176,7 +1174,6 @@ public abstract class ConsumerLoop { config.setProperty("client.id", "1"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); - config.setProperty("experimental.snapshot.enable", "true"); this.consumer = new TaosConsumer<>(config); this.topics = Collections.singletonList("topic_speed"); diff --git a/docs/en/14-reference/08-taos-shell.md b/docs/en/14-reference/08-taos-shell.md index e66da7ec17..7e0433a8b2 100644 --- a/docs/en/14-reference/08-taos-shell.md +++ b/docs/en/14-reference/08-taos-shell.md @@ -81,6 +81,14 @@ For example: taos -h h1.taos.com -s "use db; show tables;" ``` +## Export query results to a file + +- You can use ">>" to export the query results to a file, the syntax is like `select * from table >> file`. If there is only file name without path, the file will be generated under the current working directory of TDegnine CLI. + +## Import data from CSV file + +- You can use `insert into table_name file 'fileName'` to import the data from the specified file into the specified table. For example, `insert into d0 file '/root/d0.csv';` means importing the data in file "/root/d0.csv" into table "d0". If there is only file name without path, that means the file is located under current working directory of TDengine CLI. + ## TDengine CLI tips - You can use the up and down keys to iterate the history of commands entered @@ -89,3 +97,5 @@ taos -h h1.taos.com -s "use db; show tables;" - Execute `RESET QUERY CACHE` to clear the local cache of the table schema - Execute SQL statements in batches. You can store a series of shell commands (ending with ;, one line for each SQL command) in a script file and execute the command `source ` in the TDengine CLI to execute all SQL commands in that file automatically - Enter `q` to exit TDengine CLI + + diff --git a/docs/examples/c/tmq_example.c b/docs/examples/c/tmq_example.c index d958428b8f..ca7c23e93b 100644 --- a/docs/examples/c/tmq_example.c +++ b/docs/examples/c/tmq_example.c @@ -227,12 +227,6 @@ tmq_t* build_consumer() { return NULL; } - code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); - if (TMQ_CONF_OK != code) { - tmq_conf_destroy(conf); - return NULL; - } - tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); diff --git a/docs/examples/go/sub/main.go b/docs/examples/go/sub/main.go index cb24e351ab..ed335cfdea 100644 --- a/docs/examples/go/sub/main.go +++ b/docs/examples/go/sub/main.go @@ -35,7 +35,6 @@ func main() { "td.connect.port": "6030", "client.id": "test_tmq_client", "enable.auto.commit": "false", - "experimental.snapshot.enable": "true", "msg.with.table.name": "true", }) if err != nil { diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index 38b91d7cea..6852d5551e 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -25,7 +25,20 @@ import CDemo from "./_sub_c.mdx"; 本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。 -注意:数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似。 +说明(以c接口为例): +1. 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立; +2. 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据; +3. 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联; +4. 每次poll服务端会返回一个结果block,该block属于一个vgroup,可能包含多个wal版本的数据,可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset; +5. 一个消费组如果从未commit过offset,当其成员消费者重启重新拉取数据时,均从参数auto.offset.reset设定值开始消费;在一个消费者生命周期中,客户端本地记录了最近一次拉取数据的offset,不会拉取重复数据; +6. 消费者如果异常终止(没有调用tmq_close),需等约12秒后触发其所属消费组rebalance,该消费者在服务端状态变为LOST,约1天后该消费者自动被删除;正常退出,退出后就会删除消费者;新增消费者,需等约2秒触发rebalance,该消费者在服务端状态变为ready; +7. 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配,消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作; +8. 消费者可利用 tmq_position 获得当前消费的offset,并seek到指定offset,重新消费; +9. seek将position指向指定offset,不执行commit操作,一旦seek成功,可poll拉取指定offset及以后的数据; +10. seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法,如非法将报错; +11. tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset,当seek至该offset时,将消费到这个block里的全部数据。参见第四点; +12. 由于存在 WAL 过期删除机制,即使seek 操作成功,poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号,将会从WAL最小版本号消费; +13. 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似; ## 主要数据结构和 API @@ -35,39 +48,60 @@ import CDemo from "./_sub_c.mdx"; ```c -typedef struct tmq_t tmq_t; -typedef struct tmq_conf_t tmq_conf_t; -typedef struct tmq_list_t tmq_list_t; + typedef struct tmq_t tmq_t; + typedef struct tmq_conf_t tmq_conf_t; + typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); -DLL_EXPORT const char *tmq_err2str(int32_t code); + typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +} tmq_conf_res_t; -DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); -DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; +} tmq_topic_assignment; -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; -typedef enum tmq_conf_res_t tmq_conf_res_t; + DLL_EXPORT tmq_conf_t *tmq_conf_new(); + DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); + DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); + DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + DLL_EXPORT tmq_list_t *tmq_list_new(); + DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); + DLL_EXPORT void tmq_list_destroy(tmq_list_t *); + DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); + DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + + DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); + DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); + DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); + DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); + DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); + DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); + DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); + DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); + DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); + DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); + DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); + + DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); + DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); + DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); + DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); + DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); ``` -这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。 +下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。 diff --git a/docs/zh/08-connector/10-cpp.mdx b/docs/zh/08-connector/10-cpp.mdx index 9c5095f09c..c0723cd85c 100644 --- a/docs/zh/08-connector/10-cpp.mdx +++ b/docs/zh/08-connector/10-cpp.mdx @@ -467,21 +467,22 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 - `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` **功能说明** - 该接口将行协议的文本数据写入到 TDengine 中。 + - 该接口将行协议的文本数据写入到 TDengine 中。 **参数说明** - taos: 数据库连接,通过 `taos_connect()` 函数建立的数据库连接。 - lines:文本数据。满足解析格式要求的无模式文本字符串。 - numLines:文本数据的行数,不能为 0 。 - protocol: 行协议类型,用于标识文本数据格式。 - precision:文本数据中的时间戳精度字符串。 + - taos: 数据库连接,通过 `taos_connect()` 函数建立的数据库连接。 + - lines:文本数据。满足解析格式要求的无模式文本字符串。 + - numLines:文本数据的行数,不能为 0 。 + - protocol: 行协议类型,用于标识文本数据格式。 + - precision:文本数据中的时间戳精度字符串。 **返回值** - TAOS_RES 结构体,应用可以通过使用 `taos_errstr()` 获得错误信息,也可以使用 `taos_errno()` 获得错误码。 + - TAOS_RES 结构体,应用可以通过使用 `taos_errstr()` 获得错误信息,也可以使用 `taos_errno()` 获得错误码。 在某些情况下,返回的 TAOS_RES 为 `NULL`,此时仍然可以调用 `taos_errno()` 来安全地获得错误码信息。 返回的 TAOS_RES 需要调用方来负责释放,否则会出现内存泄漏。 **说明** + 协议类型是枚举类型,包含以下三种格式: - TSDB_SML_LINE_PROTOCOL:InfluxDB 行协议(Line Protocol) @@ -515,3 +516,90 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 - 带_raw的接口通过传递的参数lines指针和长度len来表示数据,为了解决原始接口数据包含'\0'而被截断的问题。totalRows指针返回解析出来的数据行数。 - 带_ttl的接口可以传递ttl参数来控制建表的ttl到期时间。 - 带_reqid的接口可以通过传递reqid参数来追踪整个的调用链。 + +### 数据订阅 API + +- `int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment)` +- `void tmq_free_assignment(tmq_topic_assignment* pAssignment)` + + tmq_topic_assignment结构体定义如下: + ```c + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; + } tmq_topic_assignment; + ``` + **功能说明** + - tmq_get_topic_assignment 接口返回当前consumer分配的vgroup的信息,每个vgroup的信息包括vgId,wal的最大最小offset,以及当前消费到的offset。 + + **参数说明** + - numOfAssignment :分配给该consumer有效的vgroup个数。 + - assignment :分配的信息,数据大小为numOfAssignment,需要通过 tmq_free_assignment 接口释放。 + + **返回值** + - 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息。 + +- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)` + **功能说明** + - 获取当前 consumer 在某个 topic 和 vgroup上的 commit 位置。 + + **返回值** + - 当前commit的位置,-2147467247表示没有消费进度,其他小于0的值表示失败,错误码就是返回值 + +- `int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg)` +- `void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param)` +- `int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)` +- `void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param)` + + **功能说明** + + commit接口分为两种类型,每种类型有同步和异步接口: + - 第一种类型:根据消息提交,提交消息里的进度,如果消息传NULL,提交当前consumer所有消费的vgroup的当前进度 : tmq_commit_sync/tmq_commit_async + - 第二种类型:根据某个topic的某个vgroup的offset提交 : tmq_commit_offset_sync/tmq_commit_offset_async + + **参数说明** + - msg:消费到的消息结构,如果msg传NULL,提交当前consumer所有消费的vgroup的当前进度 + + **返回值** + - 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息 + +- `int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId)` + + **功能说明** + - 获取当前消费位置,为消费到的数据位置的下一个位置 + + **返回值** + - 消费位置,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息 + +- `int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)` + + **功能说明** + - 设置 consumer 在某个topic的某个vgroup的 offset位置,开始消费 + + **返回值** + - 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息 + +- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)` + + **功能说明** + + 获取 poll 消费到的数据的起始offset + + **参数说明** + - msg:消费到的消息结构 + + **返回值** + - 消费到的offset,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息 + +- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)` + + **功能说明** + + 获取消费者订阅的 topic 列表 + **参数说明** + - topics: 获取的 topic 列表存储在这个结构中,接口内分配内存,需调用tmq_list_destroy释放 + + **返回值** + - 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息 \ No newline at end of file diff --git a/docs/zh/12-taos-sql/25-grant.md b/docs/zh/12-taos-sql/25-grant.md index a9c3910500..d53f951e67 100644 --- a/docs/zh/12-taos-sql/25-grant.md +++ b/docs/zh/12-taos-sql/25-grant.md @@ -4,7 +4,7 @@ title: 权限管理 description: 企业版中才具有的权限管理功能 --- -本节讲述如何在 TDengine 中进行权限管理的相关操作。 +本节讲述如何在 TDengine 中进行权限管理的相关操作。权限管理是 TDengine 企业版的特有功能,本节只列举了一些基本的权限管理功能作为示例,更丰富的权限管理请联系 TDengine 销售或市场团队。 ## 创建用户 diff --git a/docs/zh/14-reference/08-taos-shell.md b/docs/zh/14-reference/08-taos-shell.md index 3423cf35bb..1caa580b73 100644 --- a/docs/zh/14-reference/08-taos-shell.md +++ b/docs/zh/14-reference/08-taos-shell.md @@ -89,3 +89,11 @@ taos -h h1.taos.com -s "use db; show tables;" - 执行 `RESET QUERY CACHE` 可清除本地表 Schema 的缓存 - 批量执行 SQL 语句。可以将一系列的 TDengine CLI 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 TDengine CLI 里执行命令 `source ` 自动执行该文件里所有的 SQL 语句 - 输入 `q` 或 `quit` 或 `exit` 回车,可以退出 TDengine CLI + +## TDengine CLI 导出查询结果到文件中 + +- 可以使用符号 “>>” 导出查询结果到某个文件中,语法为: sql 查询语句 >> ‘输出文件名’; 输出文件如果不写路径的话,将输出至当前目录下。如 select * from d0 >> ‘/root/d0.csv’; 将把查询结果输出到 /root/d0.csv 中。 + +## TDengine CLI 导入文件中的数据到表中 + +- 可以使用 insert into table_name file '输入文件名',把上一步中导出的数据文件再导入到指定表中。如 insert into d0 file '/root/d0.csv'; 表示把上面导出的数据全部再导致至 d0 表中。 diff --git a/examples/c/tmq.c b/examples/c/tmq.c index e1133c109e..136c54b874 100644 --- a/examples/c/tmq.c +++ b/examples/c/tmq.c @@ -228,11 +228,6 @@ tmq_t* build_consumer() { tmq_conf_destroy(conf); return NULL; } - code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); - if (TMQ_CONF_OK != code) { - tmq_conf_destroy(conf); - return NULL; - } tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq = tmq_consumer_new(conf, NULL, 0); diff --git a/include/client/taos.h b/include/client/taos.h index 3cc2d907ab..5b7946c9ad 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -260,19 +260,14 @@ typedef struct tmq_t tmq_t; typedef struct tmq_conf_t tmq_conf_t; typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); +typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); -DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); +typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +} tmq_conf_res_t; -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); - -DLL_EXPORT const char *tmq_err2str(int32_t code); - -/* ------------------------TMQ CONSUMER INTERFACE------------------------ */ typedef struct tmq_topic_assignment { int32_t vgId; int64_t currentOffset; @@ -280,43 +275,38 @@ typedef struct tmq_topic_assignment { int64_t end; } tmq_topic_assignment; -DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); -DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); -DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); -DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); -DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, - int32_t *numOfAssignment); -DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); -DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); - -DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); -DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); -DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); -DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); -DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); -DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); - -/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */ - -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; - -typedef enum tmq_conf_res_t tmq_conf_res_t; - DLL_EXPORT tmq_conf_t *tmq_conf_new(); DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); -/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */ +DLL_EXPORT tmq_list_t *tmq_list_new(); +DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); +DLL_EXPORT void tmq_list_destroy(tmq_list_t *); +DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); +DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + +DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); +DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); +DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); +DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); +DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); +DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); +DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msg’s offset + 1 +DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); +DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); +DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); +DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); +DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); +DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); +DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1 +DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); + +DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); +DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); +DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); +DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); +DLL_EXPORT const char *tmq_err2str(int32_t code); /* ------------------------------ TAOSX -----------------------------------*/ // note: following apis are unstable diff --git a/include/common/tcommon.h b/include/common/tcommon.h index bdfb1d32b4..8482ba8a78 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -152,6 +152,8 @@ enum { STREAM_INPUT__DATA_RETRIEVE, STREAM_INPUT__GET_RES, STREAM_INPUT__CHECKPOINT, + STREAM_INPUT__CHECKPOINT_TRIGGER, + STREAM_INPUT__TRANS_STATE, STREAM_INPUT__REF_DATA_BLOCK, STREAM_INPUT__DESTROY, }; @@ -168,7 +170,9 @@ typedef enum EStreamType { STREAM_PULL_DATA, STREAM_PULL_OVER, STREAM_FILL_OVER, + STREAM_CHECKPOINT, STREAM_CREATE_CHILD_TABLE, + STREAM_TRANS_STATE, } EStreamType; #pragma pack(push, 1) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 0d3852cbab..5fd174e873 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -199,6 +199,7 @@ extern bool tsFilterScalarMode; extern int32_t tsKeepTimeOffset; extern int32_t tsMaxStreamBackendCache; extern int32_t tsPQSortMemThreshold; +extern int32_t tsResolveFQDNRetryTime; // #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 232551007d..60172bce3d 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -254,7 +254,6 @@ enum { TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY, "stream-scan-history", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY_FINISH, "stream-scan-history-finish", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_STREAM_TRANSFER_STATE, "stream-transfer-state", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECK, "stream-task-check", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT, "stream-checkpoint", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL) diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h index 773f373a2d..724d6638db 100644 --- a/include/libs/executor/storageapi.h +++ b/include/libs/executor/storageapi.h @@ -98,6 +98,16 @@ typedef struct SMTbCursor { int8_t paused; } SMTbCursor; +typedef struct SMCtbCursor { + SMeta *pMeta; + void *pCur; + tb_uid_t suid; + void *pKey; + void *pVal; + int kLen; + int vLen; +} SMCtbCursor; + typedef struct SRowBuffPos { void* pRowBuff; void* pKey; @@ -278,13 +288,15 @@ typedef struct SStoreMeta { void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables, int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) & // metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta); - int64_t (*getNumOfRowsInMem)(void* pVnode); /** int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list); int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg); int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list); */ + SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock); + void (*closeCtbCursor)(SMCtbCursor *pCtbCur, int lock); + tb_uid_t (*ctbCursorNext)(SMCtbCursor* pCur); } SStoreMeta; typedef struct SStoreMetaReader { diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index 402b8f0309..eebb69e6ba 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -157,6 +157,8 @@ typedef enum EFunctionType { FUNCTION_TYPE_AVG_MERGE, FUNCTION_TYPE_STDDEV_PARTIAL, FUNCTION_TYPE_STDDEV_MERGE, + FUNCTION_TYPE_IRATE_PARTIAL, + FUNCTION_TYPE_IRATE_MERGE, // geometry functions FUNCTION_TYPE_GEOM_FROM_TEXT = 4250, diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 2319643b09..8eeeff4148 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -169,7 +169,7 @@ typedef enum ENodeType { QUERY_NODE_REVOKE_STMT, QUERY_NODE_SHOW_DNODES_STMT, QUERY_NODE_SHOW_MNODES_STMT, - QUERY_NODE_SHOW_MODULES_STMT, +// QUERY_NODE_SHOW_MODULES_STMT, QUERY_NODE_SHOW_QNODES_STMT, QUERY_NODE_SHOW_SNODES_STMT, QUERY_NODE_SHOW_BNODES_STMT, diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 063318332a..b4f0a67fa0 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -107,6 +107,7 @@ typedef struct SScanLogicNode { bool sortPrimaryKey; bool igLastNull; bool groupOrderScan; + bool onlyMetaCtbIdx; // for tag scan with no tbname } SScanLogicNode; typedef struct SJoinLogicNode { @@ -334,7 +335,11 @@ typedef struct SScanPhysiNode { bool groupOrderScan; } SScanPhysiNode; -typedef SScanPhysiNode STagScanPhysiNode; +typedef struct STagScanPhysiNode { + SScanPhysiNode scan; + bool onlyMetaCtbIdx; //no tbname, tag index not used. +} STagScanPhysiNode; + typedef SScanPhysiNode SBlockDistScanPhysiNode; typedef struct SLastRowScanPhysiNode { @@ -603,6 +608,8 @@ typedef struct SSubplan { SNode* pTagCond; SNode* pTagIndexCond; bool showRewrite; + int32_t rowsThreshold; + bool dynamicRowThreshold; } SSubplan; typedef enum EExplainMode { EXPLAIN_MODE_DISABLE = 1, EXPLAIN_MODE_STATIC, EXPLAIN_MODE_ANALYZE } EExplainMode; diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index b9b24917f3..02bb65b762 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -122,6 +122,7 @@ typedef struct { int8_t type; int32_t srcVgId; + int32_t srcTaskId; int32_t childId; int64_t sourceVer; int64_t reqId; @@ -251,6 +252,7 @@ typedef struct SStreamChildEpInfo { int32_t nodeId; int32_t childId; int32_t taskId; + int8_t dataAllowed; SEpSet epSet; } SStreamChildEpInfo; @@ -272,6 +274,7 @@ typedef struct SStreamStatus { int8_t schedStatus; int8_t keepTaskStatus; bool transferState; + bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it int8_t timerActive; // timer is active int8_t pauseAllowed; // allowed task status to be set to be paused } SStreamStatus; @@ -399,8 +402,9 @@ typedef struct { typedef struct { int64_t streamId; + int32_t type; int32_t taskId; - int32_t dataSrcVgId; + int32_t srcVgId; int32_t upstreamTaskId; int32_t upstreamChildId; int32_t upstreamNodeId; @@ -570,8 +574,6 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq); int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq); void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq); -int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, int32_t numOfBlocks, - int64_t dstTaskId); void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq); int32_t streamSetupScheduleTrigger(SStreamTask* pTask); @@ -579,6 +581,8 @@ int32_t streamSetupScheduleTrigger(SStreamTask* pTask); int32_t streamProcessRunReq(SStreamTask* pTask); int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec); int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code); +void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId); +void streamTaskOpenAllUpstreamInput(SStreamTask* pTask); int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg); @@ -589,7 +593,6 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock bool streamTaskShouldStop(const SStreamStatus* pStatus); bool streamTaskShouldPause(const SStreamStatus* pStatus); bool streamTaskIsIdle(const SStreamTask* pTask); -int32_t streamTaskEndScanWAL(SStreamTask* pTask); SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId); int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize); @@ -626,7 +629,7 @@ int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* p int32_t streamSourceScanHistoryData(SStreamTask* pTask); int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask); -int32_t streamDispatchTransferStateMsg(SStreamTask* pTask); +int32_t appendTranstateIntoInputQ(SStreamTask* pTask); // agg level int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask); diff --git a/include/os/osFile.h b/include/os/osFile.h index 0e93002706..da1f8f8b57 100644 --- a/include/os/osFile.h +++ b/include/os/osFile.h @@ -76,7 +76,7 @@ int32_t taosUnLockFile(TdFilePtr pFile); int32_t taosUmaskFile(int32_t maskVal); -int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime); +int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime); int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno); int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime); bool taosCheckExistFile(const char *pathname); diff --git a/include/os/osRand.h b/include/os/osRand.h index 27d07e8c6f..5d907bba15 100644 --- a/include/os/osRand.h +++ b/include/os/osRand.h @@ -32,6 +32,8 @@ void taosSeedRand(uint32_t seed); uint32_t taosRand(void); uint32_t taosRandR(uint32_t* pSeed); void taosRandStr(char* str, int32_t size); +void taosRandStr2(char* str, int32_t size); + uint32_t taosSafeRand(void); #ifdef __cplusplus diff --git a/include/util/taoserror.h b/include/util/taoserror.h index b43985074c..a5081f2c7d 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -191,6 +191,7 @@ int32_t* taosGetErrno(); // #define TSDB_CODE_MND_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0313) // 2.x // #define TSDB_CODE_MND_FAILED_TO_INIT_STEP TAOS_DEF_ERROR_CODE(0, 0x0314) // 2.x #define TSDB_CODE_MND_USER_DISABLED TAOS_DEF_ERROR_CODE(0, 0x0315) +#define TSDB_CODE_MND_INVALID_PLATFORM TAOS_DEF_ERROR_CODE(0, 0x0316) // mnode-sdb #define TSDB_CODE_SDB_OBJ_ALREADY_THERE TAOS_DEF_ERROR_CODE(0, 0x0320) // internal diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt index d0cfd38fb9..e45ee9c932 100644 --- a/source/client/CMakeLists.txt +++ b/source/client/CMakeLists.txt @@ -16,7 +16,7 @@ target_include_directories( target_link_libraries( taos INTERFACE api - PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom + PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom geometry ) if(TD_DARWIN_ARM64) @@ -57,7 +57,7 @@ target_include_directories( target_link_libraries( taos_static INTERFACE api - PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom + PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom geometry ) if(${BUILD_TEST}) diff --git a/source/client/inc/clientSml.h b/source/client/inc/clientSml.h index 040064560c..1839c14894 100644 --- a/source/client/inc/clientSml.h +++ b/source/client/inc/clientSml.h @@ -33,6 +33,7 @@ extern "C" { #include "ttime.h" #include "ttypes.h" #include "cJSON.h" +#include "geosWrapper.h" #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) # define expect(expr,value) (__builtin_expect ((expr),(value)) ) @@ -192,7 +193,7 @@ typedef struct { // SArray *preLineTagKV; SArray *maxTagKVs; - SArray *masColKVs; + SArray *maxColKVs; SSmlLineInfo preLine; STableMeta *currSTableMeta; diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 266368e9bc..a876ff9455 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1073,6 +1073,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { return 0; end: + taosHashCancelIterate(info->superTables, tmp); taosHashCleanup(hashTmp); taosMemoryFreeClear(pTableMeta); catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1); @@ -1191,6 +1192,7 @@ void freeSSmlKv(void *data) { SSmlKv *kv = (SSmlKv *)data; if (kv->keyEscaped) taosMemoryFree((void *)(kv->key)); if (kv->valueEscaped) taosMemoryFree((void *)(kv->value)); + if (kv->type == TSDB_DATA_TYPE_GEOMETRY) geosFreeBuffer((void *)(kv->value)); } void smlDestroyInfo(SSmlHandle *info) { @@ -1433,6 +1435,7 @@ static int32_t smlInsertData(SSmlHandle *info) { code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE); if(code != TSDB_CODE_SUCCESS){ taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); return code; } @@ -1441,6 +1444,7 @@ static int32_t smlInsertData(SSmlHandle *info) { if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " catalogGetTableHashVgroup failed. table name: %s", info->id, tableData->childTableName); taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); return code; } taosHashPut(info->pVgHash, (const char *)&vg.vgId, sizeof(vg.vgId), (char *)&vg, sizeof(vg)); @@ -1450,6 +1454,7 @@ static int32_t smlInsertData(SSmlHandle *info) { if (unlikely(NULL == pMeta || NULL == (*pMeta)->tableMeta)) { uError("SML:0x%" PRIx64 " NULL == pMeta. table name: %s", info->id, tableData->childTableName); taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); return TSDB_CODE_SML_INTERNAL_ERROR; } @@ -1465,6 +1470,7 @@ static int32_t smlInsertData(SSmlHandle *info) { taosMemoryFree(measure); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlBindData failed", info->id); + taosHashCancelIterate(info->childTables, oneTable); return code; } oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, oneTable); diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index 90869d6148..73622a11cc 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -102,6 +102,30 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { return TSDB_CODE_TSC_INVALID_VALUE; } + if (pVal->value[0] == 'g' || pVal->value[0] == 'G') { // geometry + if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= sizeof("POINT")+3) { + int32_t code = initCtxGeomFromText(); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + char* tmp = taosMemoryCalloc(pVal->length, 1); + memcpy(tmp, pVal->value + 2, pVal->length - 3); + code = doGeomFromText(tmp, (unsigned char **)&pVal->value, &pVal->length); + taosMemoryFree(tmp); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + pVal->type = TSDB_DATA_TYPE_GEOMETRY; + if (pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { + geosFreeBuffer((void*)(pVal->value)); + return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; + } + return TSDB_CODE_SUCCESS; + } + return TSDB_CODE_TSC_INVALID_VALUE; + } + if (pVal->value[0] == 't' || pVal->value[0] == 'T') { if (pVal->length == 1 || (pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R') && @@ -390,14 +414,14 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin SSmlKv kv = {.key = tag->name, .keyLen = strlen(tag->name), .type = tag->type}; if (tag->type == TSDB_DATA_TYPE_NCHAR) { kv.length = (tag->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; - } else if (tag->type == TSDB_DATA_TYPE_BINARY || tag->type == TSDB_DATA_TYPE_VARBINARY) { + } else if (tag->type == TSDB_DATA_TYPE_BINARY || tag->type == TSDB_DATA_TYPE_GEOMETRY || tag->type == TSDB_DATA_TYPE_VARBINARY) { kv.length = tag->bytes - VARSTR_HEADER_SIZE; } taosArrayPush((*tmp)->cols, &kv); } } info->currSTableMeta = (*tmp)->tableMeta; - info->masColKVs = (*tmp)->cols; + info->maxColKVs = (*tmp)->cols; } } @@ -512,13 +536,13 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin freeSSmlKv(&kv); return TSDB_CODE_SUCCESS; } - if (cnt >= taosArrayGetSize(info->masColKVs)) { + if (cnt >= taosArrayGetSize(info->maxColKVs)) { info->dataFormat = false; info->reRun = true; freeSSmlKv(&kv); return TSDB_CODE_SUCCESS; } - SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->masColKVs, cnt); + SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->maxColKVs, cnt); if (kv.type != maxKV->type) { info->dataFormat = false; info->reRun = true; @@ -663,14 +687,15 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine if (info->dataFormat) { uDebug("SML:0x%" PRIx64 " smlParseInfluxString format true, ts:%" PRId64, info->id, ts); ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 0); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - ret = smlBuildRow(info->currTableDataCtx); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + if (ret == TSDB_CODE_SUCCESS) { + ret = smlBuildRow(info->currTableDataCtx); } + clearColValArray(info->currTableDataCtx->pValues); + if (unlikely(ret != TSDB_CODE_SUCCESS)) { + smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL); + return ret; + } } else { uDebug("SML:0x%" PRIx64 " smlParseInfluxString format false, ts:%" PRId64, info->id, ts); taosArraySet(elements->colArray, 0, &kv); diff --git a/source/client/test/CMakeLists.txt b/source/client/test/CMakeLists.txt index 34c377c6ea..91f0d1eef8 100644 --- a/source/client/test/CMakeLists.txt +++ b/source/client/test/CMakeLists.txt @@ -20,7 +20,7 @@ TARGET_LINK_LIBRARIES( ADD_EXECUTABLE(smlTest smlTest.cpp) TARGET_LINK_LIBRARIES( smlTest - PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom + PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom geometry ) TARGET_INCLUDE_DIRECTORIES( diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index 356ea2be1c..b010467f20 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -16,6 +16,14 @@ ENDIF () IF (TD_STORAGE) ADD_DEFINITIONS(-D_STORAGE) TARGET_LINK_LIBRARIES(common PRIVATE storage) + + IF(${TD_LINUX}) + IF(${BUILD_WITH_COS}) + add_definitions(-DUSE_COS) + ENDIF(${BUILD_WITH_COS}) + + ENDIF(${TD_LINUX}) + ENDIF () target_include_directories( diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 0940fcef6a..eb8042099f 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -314,7 +314,7 @@ static const SSysDbTableSchema userUserPrivilegesSchema[] = { static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true}, {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true}, - {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true}, + // {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true}, {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true}, {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema), true}, {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true}, diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index efbfa6e4e6..a517193a47 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -26,7 +26,7 @@ int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRo if (pColumnInfoData->reassigned) { int32_t totalSize = 0; for (int32_t row = 0; row < numOfRows; ++row) { - char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row]; + char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row]; int32_t colSize = 0; if (pColumnInfoData->info.type == TSDB_DATA_TYPE_JSON) { colSize = getJsonValueLen(pColData); @@ -142,7 +142,8 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const return 0; } -int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData) { +int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, + const char* pData) { int32_t type = pColumnInfoData->info.type; if (IS_VAR_DATA_TYPE(type)) { int32_t dataLen = 0; @@ -164,7 +165,6 @@ int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, return 0; } - static int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) { if (!IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { return TSDB_CODE_SUCCESS; @@ -188,16 +188,17 @@ static int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) } static int32_t doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t currentRow, const char* pData, - int32_t itemLen, int32_t numOfRows, bool trimValue) { + int32_t itemLen, int32_t numOfRows, bool trimValue) { if (pColumnInfoData->info.bytes < itemLen) { - uWarn("column/tag actual data len %d is bigger than schema len %d, trim it:%d", itemLen, pColumnInfoData->info.bytes, trimValue); + uWarn("column/tag actual data len %d is bigger than schema len %d, trim it:%d", itemLen, + pColumnInfoData->info.bytes, trimValue); if (trimValue) { itemLen = pColumnInfoData->info.bytes; } else { return TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER; } } - + size_t start = 1; // the first item @@ -230,8 +231,8 @@ static int32_t doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t cur return TSDB_CODE_SUCCESS; } -int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, - uint32_t numOfRows, bool trimValue) { +int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows, + bool trimValue) { int32_t len = pColumnInfoData->info.bytes; if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { len = varDataTLen(pData); @@ -262,7 +263,7 @@ static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, c uint8_t* p = (uint8_t*)pSource->nullbitmap; pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] &= (0B11111111 << shiftBits); // clear remind bits - pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] |= (p[0] >> remindBits); // copy remind bits + pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] |= (p[0] >> remindBits); // copy remind bits if (BitmapLen(numOfRow1) == BitmapLen(total)) { return; @@ -350,7 +351,7 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int pColumnInfoData->pData = tmp; if (BitmapLen(numOfRow1) < BitmapLen(finalNumOfRows)) { - char* btmp = taosMemoryRealloc(pColumnInfoData->nullbitmap, BitmapLen(finalNumOfRows)); + char* btmp = taosMemoryRealloc(pColumnInfoData->nullbitmap, BitmapLen(finalNumOfRows)); if (btmp == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -622,7 +623,7 @@ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) { if (pCol->reassigned && IS_VAR_DATA_TYPE(pCol->info.type)) { for (int32_t row = 0; row < numOfRows; ++row) { - char* pColData = pCol->pData + pCol->varmeta.offset[row]; + char* pColData = pCol->pData + pCol->varmeta.offset[row]; int32_t colSize = 0; if (pCol->info.type == TSDB_DATA_TYPE_JSON) { colSize = getJsonValueLen(pColData); @@ -698,8 +699,7 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) { return TSDB_CODE_SUCCESS; } -static bool colDataIsNNull(const SColumnInfoData* pColumnInfoData, int32_t startIndex, - uint32_t nRows) { +static bool colDataIsNNull(const SColumnInfoData* pColumnInfoData, int32_t startIndex, uint32_t nRows) { if (!pColumnInfoData->hasNull) { return false; } @@ -880,7 +880,6 @@ int32_t dataBlockCompar(const void* p1, const void* p2, const void* param) { } static int32_t blockDataAssign(SColumnInfoData* pCols, const SSDataBlock* pDataBlock, const int32_t* index) { - size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pDst = &pCols[i]; @@ -1131,6 +1130,7 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* if (tmp == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } + // memset(tmp, 0, numOfRows * pColumn->info.bytes); // copy back the existed data if (pColumn->pData != NULL) { @@ -1474,8 +1474,8 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int int end = nRows; while (start <= end) { int mid = start + (end - start) / 2; - //data size + var data type columns offset + fixed data type columns bitmap len - int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid); + // data size + var data type columns offset + fixed data type columns bitmap len + int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid); if (midSize > payloadSize) { result = mid; end = mid - 1; @@ -1669,7 +1669,7 @@ int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) { if (pColData->reassigned && IS_VAR_DATA_TYPE(pColData->info.type)) { for (int32_t row = 0; row < rows; ++row) { - char* pData = pColData->pData + pColData->varmeta.offset[row]; + char* pData = pColData->pData + pColData->varmeta.offset[row]; int32_t colSize = 0; if (pColData->info.type == TSDB_DATA_TYPE_JSON) { colSize = getJsonValueLen(pData); @@ -1772,7 +1772,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { // for debug char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) { - int32_t size = 2048*1024; + int32_t size = 2048 * 1024; *pDataBuf = taosMemoryCalloc(size, 1); char* dumpBuf = *pDataBuf; char pBuf[128] = {0}; @@ -1780,8 +1780,8 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) int32_t rows = pDataBlock->info.rows; int32_t len = 0; len += snprintf(dumpBuf + len, size - len, - "===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64 - "|rows:%" PRId64 "|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "|tbl:%s\n", + "===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64 "|rows:%" PRId64 + "|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "|tbl:%s\n", flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.id.groupId, pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version, pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey, pDataBlock->info.parTbName); @@ -2157,21 +2157,21 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { data += metaSize; dataLen += metaSize; - if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) { - colSizes[col] = 0; - for (int32_t row = 0; row < numOfRows; ++row) { - char* pColData = pColRes->pData + pColRes->varmeta.offset[row]; - int32_t colSize = 0; - if (pColRes->info.type == TSDB_DATA_TYPE_JSON) { - colSize = getJsonValueLen(pColData); - } else { - colSize = varDataTLen(pColData); - } - colSizes[col] += colSize; - dataLen += colSize; - memmove(data, pColData, colSize); - data += colSize; + if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) { + colSizes[col] = 0; + for (int32_t row = 0; row < numOfRows; ++row) { + char* pColData = pColRes->pData + pColRes->varmeta.offset[row]; + int32_t colSize = 0; + if (pColRes->info.type == TSDB_DATA_TYPE_JSON) { + colSize = getJsonValueLen(pColData); + } else { + colSize = varDataTLen(pColData); } + colSizes[col] += colSize; + dataLen += colSize; + memmove(data, pColData, colSize); + data += colSize; + } } else { colSizes[col] = colDataGetLength(pColRes, numOfRows); dataLen += colSizes[col]; @@ -2182,7 +2182,8 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { } colSizes[col] = htonl(colSizes[col]); -// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, htonl(colSizes[col]), colSizes[col]); + // uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, + // htonl(colSizes[col]), colSizes[col]); } *actualLen = dataLen; @@ -2284,7 +2285,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) { } void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList) { -// int32_t totalRows = pBlock->info.rows; + // int32_t totalRows = pBlock->info.rows; int32_t bmLen = BitmapLen(totalRows); char* pBitmap = NULL; int32_t maxRows = 0; @@ -2311,8 +2312,9 @@ void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList if (colDataIsNull_var(pDst, j)) { colDataSetNull_var(pDst, numOfRows); } else { - // fix address sanitizer error. p1 may point to memory that will change during realloc of colDataSetVal, first copy it to p2 - char* p1 = colDataGetVarData(pDst, j); + // fix address sanitizer error. p1 may point to memory that will change during realloc of colDataSetVal, first + // copy it to p2 + char* p1 = colDataGetVarData(pDst, j); int32_t len = 0; if (pDst->info.type == TSDB_DATA_TYPE_JSON) { len = getJsonValueLen(p1); diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index a772efc33c..e080c2d2ec 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -216,7 +216,11 @@ uint32_t tsCurRange = 100; // range char tsCompressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR // udf -bool tsStartUdfd = true; +#ifdef WINDOWS +bool tsStartUdfd = false; +#else +bool tsStartUdfd = true; +#endif // wal int64_t tsWalFsyncDataSizeLimit = (100 * 1024 * 1024L); @@ -236,6 +240,15 @@ int64_t tsStreamBufferSize = 128 * 1024 * 1024; int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000; bool tsFilterScalarMode = false; int32_t tsKeepTimeOffset = 0; // latency of data migration +int tsResolveFQDNRetryTime = 100; //seconds + +char tsS3Endpoint[TSDB_FQDN_LEN] = ""; +char tsS3AccessKey[TSDB_FQDN_LEN] = ""; +char tsS3AccessKeyId[TSDB_FQDN_LEN] = ""; +char tsS3AccessKeySecret[TSDB_FQDN_LEN] = ""; +char tsS3BucketName[TSDB_FQDN_LEN] = ""; +char tsS3AppId[TSDB_FQDN_LEN] = ""; +int8_t tsS3Enabled = false; #ifndef _STORAGE int32_t taosSetTfsCfg(SConfig *pCfg) { @@ -258,7 +271,43 @@ int32_t taosSetTfsCfg(SConfig *pCfg) { int32_t taosSetTfsCfg(SConfig *pCfg); #endif -struct SConfig *taosGetCfg() { return tsCfg; } +int32_t taosSetS3Cfg(SConfig *pCfg) { + tstrncpy(tsS3AccessKey, cfgGetItem(pCfg, "s3Accesskey")->str, TSDB_FQDN_LEN); + if (tsS3AccessKey[0] == '<') { + return 0; + } + char *colon = strchr(tsS3AccessKey, ':'); + if (!colon) { + uError("invalid access key:%s", tsS3AccessKey); + return -1; + } + *colon = '\0'; + tstrncpy(tsS3AccessKeyId, tsS3AccessKey, TSDB_FQDN_LEN); + tstrncpy(tsS3AccessKeySecret, colon + 1, TSDB_FQDN_LEN); + tstrncpy(tsS3Endpoint, cfgGetItem(pCfg, "s3Endpoint")->str, TSDB_FQDN_LEN); + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); + char *cos = strstr(tsS3Endpoint, "cos."); + if (cos) { + char *appid = strrchr(tsS3BucketName, '-'); + if (!appid) { + uError("failed to locate appid in bucket:%s", tsS3BucketName); + return -1; + } else { + tstrncpy(tsS3AppId, appid + 1, TSDB_FQDN_LEN); + } + } + if (tsS3BucketName[0] != '<' && tsDiskCfgNum > 1) { +#ifdef USE_COS + tsS3Enabled = true; +#endif + } + + return 0; +} + +struct SConfig *taosGetCfg() { + return tsCfg; +} static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile, char *apolloUrl) { @@ -580,6 +629,11 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "keepTimeOffset", tsKeepTimeOffset, 0, 23, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "resolveFQDNRetryTime", tsResolveFQDNRetryTime, 1, 10240, 0) != 0) return -1; + + if (cfgAddString(pCfg, "s3Accesskey", tsS3AccessKey, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1; GRANT_CFG_ADD; return 0; @@ -979,6 +1033,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsKeepTimeOffset = cfgGetItem(pCfg, "keepTimeOffset")->i32; tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32; tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32; + tsResolveFQDNRetryTime = cfgGetItem(pCfg, "resolveFQDNRetryTime")->i32; GRANT_CFG_GET; return 0; @@ -1502,6 +1557,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile if (taosSetServerCfg(tsCfg)) return -1; if (taosSetReleaseCfg(tsCfg)) return -1; if (taosSetTfsCfg(tsCfg) != 0) return -1; + if (taosSetS3Cfg(tsCfg) != 0) return -1; } taosSetSystemCfg(tsCfg); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c index cb0849f4b9..64e18ef06d 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c @@ -46,7 +46,7 @@ static int32_t mmDecodeOption(SJson *pJson, SMnodeOpt *pOption) { if (code < 0) return -1; tjsonGetInt32ValueFromDouble(replica, "role", pOption->nodeRoles[i], code); if (code < 0) return -1; - if(pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER){ + if (pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER) { pOption->numOfReplicas++; } } @@ -65,7 +65,7 @@ int32_t mmReadFile(const char *path, SMnodeOpt *pOption) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%smnode.json", path, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("mnode file:%s not exist", file); return 0; } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index da7f4d4a56..ed32e75d18 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -97,7 +97,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t SWrapperCfg *pCfgs = NULL; snprintf(file, sizeof(file), "%s%svnodes.json", pMgmt->path, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("vnode file:%s not exist", file); return 0; } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index bed9a67303..cf57deaa22 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -742,7 +742,6 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_SCAN_HISTORY_FINISH, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_SCAN_HISTORY_FINISH_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_STREAM_TRANSFER_STATE, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECK, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECK_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index 1564a09035..88f6b5da40 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -100,7 +100,7 @@ int32_t dmReadEps(SDnodeData *pData) { goto _OVER; } - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("dnode file:%s not exist", file); code = 0; goto _OVER; @@ -350,7 +350,7 @@ void dmRotateMnodeEpSet(SDnodeData *pData) { } void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet) { - if(!pData->validMnodeEps) return; + if (!pData->validMnodeEps) return; dmGetMnodeEpSet(pData, pEpSet); dTrace("msg is redirected, handle:%p num:%d use:%d", pMsg->info.handle, pEpSet->numOfEps, pEpSet->inUse); for (int32_t i = 0; i < pEpSet->numOfEps; ++i) { @@ -469,7 +469,7 @@ static int32_t dmReadDnodePairs(SDnodeData *pData) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%sdnode%sep.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dDebug("dnode file:%s not exist", file); code = 0; goto _OVER; diff --git a/source/dnode/mgmt/node_util/src/dmFile.c b/source/dnode/mgmt/node_util/src/dmFile.c index fb05f08c0c..c81efddcc1 100644 --- a/source/dnode/mgmt/node_util/src/dmFile.c +++ b/source/dnode/mgmt/node_util/src/dmFile.c @@ -38,7 +38,7 @@ int32_t dmReadFile(const char *path, const char *name, bool *pDeployed) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%s%s.json", path, TD_DIRSEP, name); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("file:%s not exist", file); code = 0; goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 1bd629e56f..4f7e80c0a3 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -666,7 +666,12 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; } - +#ifdef WINDOWS + if (taosArrayGetSize(createReq.pRetensions) > 0) { + terrno = TSDB_CODE_MND_INVALID_PLATFORM; + goto _OVER; + } +#endif mInfo("db:%s, start to create, vgroups:%d", createReq.db, createReq.numOfVgroups); if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_DB, NULL) != 0) { goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index a1e5bf895a..333fdf83bb 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -359,7 +359,10 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; } - +#ifdef WINDOWS + terrno = TSDB_CODE_MND_INVALID_PLATFORM; + goto _OVER; +#endif mInfo("func:%s, start to create, size:%d", createReq.name, createReq.codeLen); if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_FUNC) != 0) { goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndIndex.c b/source/dnode/mnode/impl/src/mndIndex.c index 2157804559..2e78116a86 100644 --- a/source/dnode/mnode/impl/src/mndIndex.c +++ b/source/dnode/mnode/impl/src/mndIndex.c @@ -79,9 +79,12 @@ int32_t mndInitIdx(SMnode *pMnode) { return sdbSetTable(pMnode->pSdb, table); } -static int32_t mndFindSuperTableTagId(const SStbObj *pStb, const char *tagName) { +static int32_t mndFindSuperTableTagId(const SStbObj *pStb, const char *tagName, int8_t *hasIdx) { for (int32_t tag = 0; tag < pStb->numOfTags; tag++) { if (strcasecmp(pStb->pTags[tag].name, tagName) == 0) { + if (IS_IDX_ON(&pStb->pTags[tag])) { + *hasIdx = 1; + } return tag; } } @@ -597,7 +600,8 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb pNew->updateTime = taosGetTimestampMs(); pNew->lock = 0; - int32_t tag = mndFindSuperTableTagId(pOld, tagName); + int8_t hasIdx = 0; + int32_t tag = mndFindSuperTableTagId(pOld, tagName, &hasIdx); if (tag < 0) { terrno = TSDB_CODE_MND_TAG_NOT_EXIST; return -1; @@ -612,14 +616,14 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb SSchema *pTag = pNew->pTags + tag; if (on == 1) { - if (IS_IDX_ON(pTag)) { + if (hasIdx && tag != 0) { terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; return -1; } else { SSCHMEA_SET_IDX_ON(pTag); } } else { - if (!IS_IDX_ON(pTag)) { + if (hasIdx == 0) { terrno = TSDB_CODE_MND_SMA_NOT_EXIST; } else { SSCHMEA_SET_IDX_OFF(pTag); @@ -667,7 +671,42 @@ _OVER: mndTransDrop(pTrans); return code; } +int8_t mndCheckIndexNameByTagName(SMnode *pMnode, SIdxObj *pIdxObj) { + // build index on first tag, and no index name; + int8_t exist = 0; + SDbObj *pDb = NULL; + if (strlen(pIdxObj->db) > 0) { + pDb = mndAcquireDb(pMnode, pIdxObj->db); + if (pDb == NULL) return 0; + } + SSmaAndTagIter *pIter = NULL; + SIdxObj *pIdx = NULL; + SSdb *pSdb = pMnode->pSdb; + while (1) { + pIter = sdbFetch(pSdb, SDB_IDX, pIter, (void **)&pIdx); + if (pIter == NULL) break; + + if (NULL != pDb && pIdx->dbUid != pDb->uid) { + sdbRelease(pSdb, pIdx); + continue; + } + if (pIdxObj->stbUid != pIdx->stbUid) { + sdbRelease(pSdb, pIdx); + continue; + } + if (strncmp(pIdxObj->colName, pIdx->colName, TSDB_COL_NAME_LEN) == 0) { + sdbCancelFetch(pSdb, pIter); + sdbRelease(pSdb, pIdx); + exist = 1; + break; + } + sdbRelease(pSdb, pIdx); + } + + mndReleaseDb(pMnode, pDb); + return exist; +} static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *req, SDbObj *pDb, SStbObj *pStb) { int32_t code = -1; SIdxObj idxObj = {0}; @@ -681,11 +720,20 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re idxObj.stbUid = pStb->uid; idxObj.dbUid = pStb->dbUid; - int32_t tag = mndFindSuperTableTagId(pStb, req->colName); + int8_t hasIdx = 0; + int32_t tag = mndFindSuperTableTagId(pStb, req->colName, &hasIdx); if (tag < 0) { terrno = TSDB_CODE_MND_TAG_NOT_EXIST; return -1; - } else if (tag == 0) { + } + int8_t exist = 0; + if (tag == 0 && hasIdx == 1) { + exist = mndCheckIndexNameByTagName(pMnode, &idxObj); + if (exist) { + terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; + return -1; + } + } else if (hasIdx == 1) { terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; return -1; } @@ -695,11 +743,11 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re return -1; } - SSchema *pTag = pStb->pTags + tag; - if (IS_IDX_ON(pTag)) { - terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; - return -1; - } + // SSchema *pTag = pStb->pTags + tag; + // if (IS_IDX_ON(pTag)) { + // terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; + // return -1; + // } code = mndAddIndexImpl(pMnode, pReq, pDb, pStb, &idxObj); return code; @@ -806,8 +854,8 @@ int32_t mndDropIdxsByStb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p if (pIdx->stbUid == pStb->uid) { if (mndSetDropIdxCommitLogs(pMnode, pTrans, pIdx) != 0) { + sdbCancelFetch(pSdb, pIter); sdbRelease(pSdb, pIdx); - sdbCancelFetch(pSdb, pIdx); return -1; } } diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 44f4751700..7d842be7de 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -58,8 +58,10 @@ static int32_t convertToRetrieveType(char *name, int32_t len) { type = TSDB_MGMT_TABLE_DNODE; } else if (strncasecmp(name, TSDB_INS_TABLE_MNODES, len) == 0) { type = TSDB_MGMT_TABLE_MNODE; +/* } else if (strncasecmp(name, TSDB_INS_TABLE_MODULES, len) == 0) { type = TSDB_MGMT_TABLE_MODULE; +*/ } else if (strncasecmp(name, TSDB_INS_TABLE_QNODES, len) == 0) { type = TSDB_MGMT_TABLE_QNODE; } else if (strncasecmp(name, TSDB_INS_TABLE_SNODES, len) == 0) { diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index b84297f6bf..e186a8742f 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -705,7 +705,10 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; } - +#ifdef WINDOWS + terrno = TSDB_CODE_MND_INVALID_PLATFORM; + goto _OVER; +#endif mInfo("sma:%s, start to create", createReq.name); if (mndCheckCreateSmaReq(&createReq) != 0) { goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index df4d4a3be7..d7d374c207 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -18,6 +18,7 @@ #include "mndDb.h" #include "mndDnode.h" #include "mndIndex.h" +#include "mndIndexComm.h" #include "mndInfoSchema.h" #include "mndMnode.h" #include "mndPerfSchema.h" @@ -822,7 +823,7 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat return -1; } - if(pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags){ + if (pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags) { terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; } @@ -857,11 +858,39 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea SStbObj stbObj = {0}; int32_t code = -1; + char fullIdxName[TSDB_INDEX_FNAME_LEN * 2] = {0}; + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "create-stb"); if (pTrans == NULL) goto _OVER; mInfo("trans:%d, used to create stb:%s", pTrans->id, pCreate->name); if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER; + + char randStr[24] = {0}; + taosRandStr2(randStr, tListLen(randStr) - 1); + SSchema *pSchema = &(stbObj.pTags[0]); + sprintf(fullIdxName, "%s.%s_%s", pDb->name, pSchema->name, randStr); + + SSIdx idx = {0}; + if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0 && idx.pIdx != NULL) { + terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; + mndReleaseIdx(pMnode, idx.pIdx); + + goto _OVER; + } + + SIdxObj idxObj = {0}; + memcpy(idxObj.name, fullIdxName, TSDB_INDEX_FNAME_LEN); + memcpy(idxObj.stb, stbObj.name, TSDB_TABLE_FNAME_LEN); + memcpy(idxObj.db, stbObj.db, TSDB_DB_FNAME_LEN); + memcpy(idxObj.colName, pSchema->name, TSDB_COL_NAME_LEN); + idxObj.createdTime = taosGetTimestampMs(); + idxObj.uid = mndGenerateUid(fullIdxName, strlen(fullIdxName)); + idxObj.stbUid = stbObj.uid; + idxObj.dbUid = stbObj.dbUid; + + if (mndSetCreateIdxCommitLogs(pMnode, pTrans, &idxObj) < 0) goto _OVER; + if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; code = 0; @@ -956,7 +985,7 @@ static int32_t mndBuildStbFromAlter(SStbObj *pStb, SStbObj *pDst, SMCreateStbReq return -1; } - if(pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags){ + if (pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags) { terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; } @@ -1188,7 +1217,7 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p return -1; } - if(pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ntags){ + if (pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ntags) { terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; } @@ -1507,7 +1536,7 @@ static int32_t mndAddSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, SArray return -1; } - if(pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ncols){ + if (pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ncols) { terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; } @@ -3184,7 +3213,6 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB SSdb *pSdb = pMnode->pSdb; SStbObj *pStb = NULL; - int32_t numOfRows = 0; if (!pShow->sysDbRsp) { numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb); @@ -3208,7 +3236,7 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB if (pShow->pIter == NULL) break; } else { fetch = true; - void *pKey = taosHashGetKey(pShow->pIter, NULL); + void *pKey = taosHashGetKey(pShow->pIter, NULL); pStb = sdbAcquire(pSdb, SDB_STB, pKey); if (!pStb) continue; } diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index a0d53ec780..427a52af3b 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -692,7 +692,10 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; } - +#ifdef WINDOWS + terrno = TSDB_CODE_MND_INVALID_PLATFORM; + goto _OVER; +#endif mInfo("stream:%s, start to create, sql:%s", createStreamReq.name, createStreamReq.sql); if (mndCheckCreateStreamReq(&createStreamReq) != 0) { diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 53f22f6e60..d62b9e4174 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -489,7 +489,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR SMqVgEp *pVgEp = taosArrayGetP(pConsumerEpNew->vgs, i); if(pVgEp->vgId == d1->vgId){ jump = true; - mInfo("pSub->offsetRows jump, because consumer id:%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId); + mInfo("pSub->offsetRows jump, because consumer id:0x%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId); break; } } diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 4000e72835..635fdcf459 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -77,6 +77,8 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) { pTask->chkInfo.version = ver; pTask->pMeta = pSnode->pMeta; + streamTaskOpenAllUpstreamInput(pTask); + pTask->pState = streamStateOpen(pSnode->path, pTask, false, -1, -1); if (pTask->pState == NULL) { return -1; diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 194ffa16f6..c70df86e20 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -8,6 +8,7 @@ set( "src/vnd/vnodeCommit.c" "src/vnd/vnodeQuery.c" "src/vnd/vnodeModule.c" + "src/vnd/vnodeCos.c" "src/vnd/vnodeSvr.c" "src/vnd/vnodeSync.c" "src/vnd/vnodeSnapshot.c" @@ -155,6 +156,45 @@ target_link_libraries( PUBLIC index ) +if(${TD_LINUX}) +set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) +target_link_libraries( + vnode + + # s3 + PUBLIC cos_c_sdk_static + PUBLIC ${APR_UTIL_LIBRARY} + PUBLIC ${APR_LIBRARY} + PUBLIC ${MINIXML_LIBRARY} + PUBLIC ${CURL_LIBRARY} +) + +# s3 +FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) +IF (APR_CONFIG_BIN) + EXECUTE_PROCESS( + COMMAND ${APR_CONFIG_BIN} --includedir + OUTPUT_VARIABLE APR_INCLUDE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +ENDIF() +include_directories (${APR_INCLUDE_DIR}) +target_include_directories( + vnode + PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + PUBLIC "$ENV{HOME}/.cos-local.1/include" + ) + +if(${BUILD_WITH_COS}) + add_definitions(-DUSE_COS) +endif(${BUILD_WITH_COS}) + +endif(${TD_LINUX}) + IF (TD_GRANT) TARGET_LINK_LIBRARIES(vnode PUBLIC grant) ENDIF () @@ -169,8 +209,6 @@ if(${BUILD_WITH_ROCKSDB}) add_definitions(-DUSE_ROCKSDB) endif(${BUILD_WITH_ROCKSDB}) - - if(${BUILD_TEST}) add_subdirectory(test) endif(${BUILD_TEST}) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h new file mode 100644 index 0000000000..cf2c5eb441 --- /dev/null +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_VND_COS_H_ +#define _TD_VND_COS_H_ + +#include "vnd.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern int8_t tsS3Enabled; + +int32_t s3Init(); +void s3CleanUp(); +int32_t s3PutObjectFromFile(const char *file, const char *object); +void s3DeleteObjects(const char *object_name[], int nobject); +bool s3Exists(const char *object_name); +bool s3Get(const char *object_name, const char *path); +void s3EvictCache(const char *path, long object_size); +long s3Size(const char *object_name); + +#ifdef __cplusplus +} +#endif + +#endif /*_TD_VND_COS_H_*/ diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index cd7704940b..d37cf833c2 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -167,7 +167,7 @@ int metaAddIndexToSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq); int metaDropIndexFromSTable(SMeta* pMeta, int64_t version, SDropIndexReq* pReq); int64_t metaGetTimeSeriesNum(SMeta* pMeta); -SMCtbCursor* metaOpenCtbCursor(SMeta* pMeta, tb_uid_t uid, int lock); +SMCtbCursor* metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock); void metaCloseCtbCursor(SMCtbCursor* pCtbCur, int lock); tb_uid_t metaCtbCursorNext(SMCtbCursor* pCtbCur); SMStbCursor* metaOpenStbCursor(SMeta* pMeta, tb_uid_t uid); @@ -250,7 +250,6 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg); -int32_t tqProcessTaskTransferStateReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskScanHistoryFinishReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskScanHistoryFinishRsp(STQ* pTq, SRpcMsg* pMsg); int32_t tqCheckLogInWal(STQ* pTq, int64_t version); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 41fc3b3d7e..d5e6a23d5c 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -17,8 +17,8 @@ #include "osMemory.h" #include "tencode.h" -void _metaReaderInit(SMetaReader* pReader, void* pVnode, int32_t flags, SStoreMeta* pAPI) { - SMeta* pMeta = ((SVnode*)pVnode)->pMeta; +void _metaReaderInit(SMetaReader *pReader, void *pVnode, int32_t flags, SStoreMeta *pAPI) { + SMeta *pMeta = ((SVnode *)pVnode)->pMeta; metaReaderDoInit(pReader, pMeta, flags); pReader->pAPI = pAPI; } @@ -143,7 +143,7 @@ tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) { int metaGetTableNameByUid(void *pVnode, uint64_t uid, char *tbName) { int code = 0; SMetaReader mr = {0}; - metaReaderDoInit(&mr, ((SVnode*)pVnode)->pMeta, 0); + metaReaderDoInit(&mr, ((SVnode *)pVnode)->pMeta, 0); code = metaReaderGetTableEntryByUid(&mr, uid); if (code < 0) { metaReaderClear(&mr); @@ -195,7 +195,7 @@ int metaGetTableUidByName(void *pVnode, char *tbName, uint64_t *uid) { int metaGetTableTypeByName(void *pVnode, char *tbName, ETableType *tbType) { int code = 0; SMetaReader mr = {0}; - metaReaderDoInit(&mr, ((SVnode*)pVnode)->pMeta, 0); + metaReaderDoInit(&mr, ((SVnode *)pVnode)->pMeta, 0); code = metaGetTableEntryByName(&mr, tbName); if (code == 0) *tbType = mr.me.type; @@ -244,7 +244,7 @@ SMTbCursor *metaOpenTbCursor(void *pVnode) { return NULL; } - SVnode* pVnodeObj = pVnode; + SVnode *pVnodeObj = pVnode; // tdbTbcMoveToFirst((TBC *)pTbCur->pDbc); pTbCur->pMeta = pVnodeObj->pMeta; pTbCur->paused = 1; @@ -408,17 +408,9 @@ _err: return NULL; } -struct SMCtbCursor { - SMeta *pMeta; - TBC *pCur; - tb_uid_t suid; - void *pKey; - void *pVal; - int kLen; - int vLen; -}; -SMCtbCursor *metaOpenCtbCursor(SMeta *pMeta, tb_uid_t uid, int lock) { +SMCtbCursor *metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock) { + SMeta* pMeta = ((SVnode*)pVnode)->pMeta; SMCtbCursor *pCtbCur = NULL; SCtbIdxKey ctbIdxKey; int ret = 0; @@ -435,7 +427,7 @@ SMCtbCursor *metaOpenCtbCursor(SMeta *pMeta, tb_uid_t uid, int lock) { metaRLock(pMeta); } - ret = tdbTbcOpen(pMeta->pCtbIdx, &pCtbCur->pCur, NULL); + ret = tdbTbcOpen(pMeta->pCtbIdx, (TBC**)&pCtbCur->pCur, NULL); if (ret < 0) { metaULock(pMeta); taosMemoryFree(pCtbCur); @@ -1139,7 +1131,7 @@ int32_t metaFilterTtl(void *pVnode, SMetaFltParam *arg, SArray *pUids) { pCursor->type = param->type; metaRLock(pMeta); - //ret = tdbTbcOpen(pMeta->pTtlIdx, &pCursor->pCur, NULL); + // ret = tdbTbcOpen(pMeta->pTtlIdx, &pCursor->pCur, NULL); END: if (pCursor->pMeta) metaULock(pCursor->pMeta); @@ -1194,7 +1186,7 @@ int32_t metaFilterTableIds(void *pVnode, SMetaFltParam *arg, SArray *pUids) { ret = -1; for (int i = 0; i < oStbEntry.stbEntry.schemaTag.nCols; i++) { SSchema *schema = oStbEntry.stbEntry.schemaTag.pSchema + i; - if (schema->colId == param->cid && param->type == schema->type && (IS_IDX_ON(schema) || i == 0)) { + if (schema->colId == param->cid && param->type == schema->type && (IS_IDX_ON(schema))) { ret = 0; } } @@ -1373,7 +1365,7 @@ int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) { } int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *pUidTagInfo) { - SMCtbCursor *pCur = metaOpenCtbCursor(((SVnode *)pVnode)->pMeta, suid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 1); // If len > 0 means there already have uids, and we only want the // tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 632e6dd872..f56837f759 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -450,12 +450,13 @@ int metaAddIndexToSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { goto _err; } if (IS_IDX_ON(pNew) && !IS_IDX_ON(pOld)) { - if (diffIdx != -1) goto _err; + // if (diffIdx != -1) goto _err; diffIdx = i; + break; } } - if (diffIdx == -1 || diffIdx == 0) { + if (diffIdx == -1) { goto _err; } @@ -586,7 +587,7 @@ int metaDropIndexFromSTable(SMeta *pMeta, int64_t version, SDropIndexReq *pReq) for (int i = 0; i < oStbEntry.stbEntry.schemaTag.nCols; i++) { SSchema *schema = oStbEntry.stbEntry.schemaTag.pSchema + i; if (0 == strncmp(schema->name, pReq->colName, sizeof(pReq->colName))) { - if (i != 0 || IS_IDX_ON(schema)) { + if (IS_IDX_ON(schema)) { pCol = schema; } break; @@ -2094,7 +2095,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { } else { for (int i = 0; i < pTagSchema->nCols; i++) { pTagColumn = &pTagSchema->pSchema[i]; - if (i != 0 && !IS_IDX_ON(pTagColumn)) continue; + if (!IS_IDX_ON(pTagColumn)) continue; STagVal tagVal = {.cid = pTagColumn->colId}; tTagGet((const STag *)pCtbEntry->ctbEntry.pTags, &tagVal); diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 1e7de3c526..8da2fff5a6 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -905,6 +905,7 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, int64_t version, void *pReq, void *pMsg, tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); if (tdExecuteRSmaAsync(pSma, version, pMsg, len, inputType, *pTbSuid) < 0) { smaError("vgId:%d, failed to process rsma submit exec 2 since: %s", SMA_VID(pSma), terrstr()); + taosHashCancelIterate(uidStore.uidHash, pIter); goto _err; } } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index de34a96836..a5f7e0eb68 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -930,6 +930,8 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) { pTask->pMsgCb = &pTq->pVnode->msgCb; pTask->pMeta = pTq->pStreamMeta; + streamTaskOpenAllUpstreamInput(pTask); + // backup the initial status, and set it to be TASK_STATUS__INIT pTask->chkInfo.version = ver; pTask->chkInfo.currentVer = ver; @@ -1274,7 +1276,9 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { if (done) { pTask->tsInfo.step2Start = taosGetTimestampMs(); - streamTaskEndScanWAL(pTask); + qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, 0.0); + appendTranstateIntoInputQ(pTask); + streamTryExec(pTask); // exec directly } else { STimeWindow* pWindow = &pTask->dataRange.window; tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64 " window:%" PRId64 "-%" PRId64 @@ -1339,44 +1343,6 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { return 0; } -// notify the downstream tasks to transfer executor state after handle all history blocks. -int32_t tqProcessTaskTransferStateReq(STQ* pTq, SRpcMsg* pMsg) { - char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); - int32_t len = pMsg->contLen - sizeof(SMsgHead); - - SStreamTransferReq req = {0}; - - SDecoder decoder; - tDecoderInit(&decoder, (uint8_t*)pReq, len); - int32_t code = tDecodeStreamScanHistoryFinishReq(&decoder, &req); - tDecoderClear(&decoder); - - tqDebug("vgId:%d start to process transfer state msg, from s-task:0x%x", pTq->pStreamMeta->vgId, req.downstreamTaskId); - - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.downstreamTaskId); - if (pTask == NULL) { - tqError("failed to find task:0x%x, it may have been dropped already. process transfer state failed", req.downstreamTaskId); - return -1; - } - - int32_t remain = streamAlignTransferState(pTask); - if (remain > 0) { - tqDebug("s-task:%s receive upstream transfer state msg, remain:%d", pTask->id.idStr, remain); - streamMetaReleaseTask(pTq->pStreamMeta, pTask); - return 0; - } - - // transfer the ownership of executor state - tqDebug("s-task:%s all upstream tasks send transfer msg, open transfer state flag", pTask->id.idStr); - ASSERT(pTask->streamTaskId.taskId != 0 && pTask->info.fillHistory == 1); - - pTask->status.transferState = true; - - streamSchedExec(pTask); - streamMetaReleaseTask(pTq->pStreamMeta, pTask); - return 0; -} - int32_t tqProcessTaskScanHistoryFinishReq(STQ* pTq, SRpcMsg* pMsg) { char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); @@ -1564,7 +1530,7 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) { if (pTask) { streamProcessDispatchRsp(pTask, pRsp, pMsg->code); streamMetaReleaseTask(pTq->pStreamMeta, pTask); - return 0; + return TSDB_CODE_SUCCESS; } else { tqDebug("vgId:%d failed to handle the dispatch rsp, since find task:0x%x failed", vgId, taskId); return TSDB_CODE_INVALID_MSG; @@ -1708,6 +1674,8 @@ int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg) { int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) { STQ* pTq = pVnode->pTq; + int32_t vgId = pVnode->config.vgId; + SMsgHead* msgStr = pMsg->pCont; char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead)); int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); @@ -1724,7 +1692,9 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) { tDecoderClear(&decoder); int32_t taskId = req.taskId; - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.taskId); + tqDebug("vgId:%d receive dispatch msg to s-task:0x%"PRIx64"-0x%x", vgId, req.streamId, taskId); + + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, taskId); if (pTask != NULL) { SRpcMsg rsp = {.info = pMsg->info, .code = 0}; streamProcessDispatchMsg(pTask, &req, &rsp, false); @@ -1741,7 +1711,7 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) { FAIL: if (pMsg->info.handle == NULL) { - tqError("s-task:0x%x vgId:%d msg handle is null, abort enqueue dispatch msg", pTq->pStreamMeta->vgId, taskId); + tqError("s-task:0x%x vgId:%d msg handle is null, abort enqueue dispatch msg", vgId, taskId); return -1; } diff --git a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c index a4428aed43..6a66da30c6 100644 --- a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c +++ b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c @@ -60,7 +60,7 @@ int32_t tqOffsetSnapRead(STqOffsetReader* pReader, uint8_t** ppData) { } int64_t sz = 0; - if (taosStatFile(fname, &sz, NULL) < 0) { + if (taosStatFile(fname, &sz, NULL, NULL) < 0) { taosCloseFile(&pFile); taosMemoryFree(fname); return -1; diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 252a0642fa..7c58431b57 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -332,8 +332,12 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con void* pBody = POINTER_SHIFT(pReader->pHead->head.body, sizeof(SMsgHead)); int32_t len = pReader->pHead->head.bodyLen - sizeof(SMsgHead); - extractDelDataBlock(pBody, len, ver, (SStreamRefDataBlock**)pItem); - tqDebug("s-task:%s delete msg extract from WAL, len:%d, ver:%"PRId64, id, len, ver); + code = extractDelDataBlock(pBody, len, ver, (SStreamRefDataBlock**)pItem); + if (code != TSDB_CODE_SUCCESS) { + tqError("s-task:%s extract delete msg from WAL failed, code:%s", id, tstrerror(code)); + } else { + tqDebug("s-task:%s delete msg extract from WAL, len:%d, ver:%"PRId64, id, len, ver); + } } else { ASSERT(0); } @@ -1088,6 +1092,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { if(ret != TDB_CODE_SUCCESS) { tqError("qGetTableList in tqUpdateTbUidList error:%d handle %s consumer:0x%" PRIx64, ret, pTqHandle->subKey, pTqHandle->consumerId); taosArrayDestroy(list); + taosHashCancelIterate(pTq->pHandle, pIter); return ret; } tqReaderSetTbUidList(pTqHandle->execHandle.pTqReader, list, NULL); diff --git a/source/dnode/vnode/src/tq/tqRestore.c b/source/dnode/vnode/src/tq/tqRestore.c index 3d9a91899c..d363031db1 100644 --- a/source/dnode/vnode/src/tq/tqRestore.c +++ b/source/dnode/vnode/src/tq/tqRestore.c @@ -210,13 +210,23 @@ int32_t doSetOffsetForWalReader(SStreamTask *pTask, int32_t vgId) { } static void checkForFillHistoryVerRange(SStreamTask* pTask, int64_t ver) { - if ((pTask->info.fillHistory == 1) && ver > pTask->dataRange.range.maxVer) { - qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64 - ", not scan wal anymore, set the transfer state flag", - pTask->id.idStr, ver, pTask->dataRange.range.maxVer); - pTask->status.transferState = true; + const char* id = pTask->id.idStr; + int64_t maxVer = pTask->dataRange.range.maxVer; - /*int32_t code = */streamSchedExec(pTask); + if ((pTask->info.fillHistory == 1) && ver > pTask->dataRange.range.maxVer) { + if (!pTask->status.appendTranstateBlock) { + qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64 + ", not scan wal anymore, add transfer-state block into inputQ", + id, ver, maxVer); + + double el = (taosGetTimestampMs() - pTask->tsInfo.step2Start) / 1000.0; + qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, el); + appendTranstateIntoInputQ(pTask); + /*int32_t code = */streamSchedExec(pTask); + } else { + qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64 ", not scan wal", + id, ver, maxVer); + } } } @@ -262,7 +272,7 @@ int32_t createStreamTaskRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) { continue; } - if ((pTask->info.fillHistory == 1) && pTask->status.transferState) { + if ((pTask->info.fillHistory == 1) && pTask->status.appendTranstateBlock) { ASSERT(status == TASK_STATUS__NORMAL); // the maximum version of data in the WAL has reached already, the step2 is done tqDebug("s-task:%s fill-history reach the maximum ver:%" PRId64 ", not scan wal anymore", pTask->id.idStr, diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index ec116c717e..c0c74d6b87 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -176,7 +176,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // SDelFile if (pTsdb->fs.pDelFile) { tsdbDelFileName(pTsdb, pTsdb->fs.pDelFile, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -195,7 +195,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // head ========= tsdbHeadFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pHeadF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -206,7 +206,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // data ========= tsdbDataFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pDataF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -221,7 +221,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // sma ============= tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -237,7 +237,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // stt =========== for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) { tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 89b7d019ae..4d3b53bc5a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -14,6 +14,7 @@ */ #include "tsdb.h" +#include "vndCos.h" // =============== PAGE-WISE FILE =============== int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **ppFD) { @@ -34,9 +35,24 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p pFD->flag = flag; pFD->pFD = taosOpenFile(path, flag); if (pFD->pFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - taosMemoryFree(pFD); - goto _exit; + const char *object_name = taosDirEntryBaseName((char *)path); + long s3_size = s3Size(object_name); + if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3_size > 0) { + s3EvictCache(path, s3_size); + s3Get(object_name, path); + + pFD->pFD = taosOpenFile(path, flag); + + if (pFD->pFD == NULL) { + code = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(pFD); + goto _exit; + } + } else { + code = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(pFD); + goto _exit; + } } pFD->szPage = szPage; pFD->pgno = 0; @@ -50,7 +66,7 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p // not check file size when reading data files. if (flag != TD_FILE_READ) { - if (taosStatFile(path, &pFD->szFile, NULL) < 0) { + if (taosStatFile(path, &pFD->szFile, NULL, NULL) < 0) { code = TAOS_SYSTEM_ERROR(errno); taosMemoryFree(pFD->pBuf); taosCloseFile(&pFD->pFD); diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index a4d5715083..267e8b4117 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -15,6 +15,7 @@ #include "tsdb.h" #include "tsdbFS2.h" +#include "vndCos.h" typedef struct { STsdb *tsdb; @@ -41,6 +42,28 @@ static int32_t tsdbDoRemoveFileObject(SRTNer *rtner, const STFileObj *fobj) { return TARRAY2_APPEND(rtner->fopArr, op); } +static int32_t tsdbRemoveFileObjectS3(SRTNer *rtner, const STFileObj *fobj) { + int32_t code = 0, lino = 0; + + STFileOp op = { + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + const char *object_name = taosDirEntryBaseName((char *)fobj->fname); + s3DeleteObjects(&object_name, 1); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + } + return code; +} + static int32_t tsdbDoCopyFile(SRTNer *rtner, const STFileObj *from, const STFile *to) { int32_t code = 0; int32_t lino = 0; @@ -76,6 +99,34 @@ _exit: return code; } +static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile *to) { + int32_t code = 0; + int32_t lino = 0; + + char fname[TSDB_FILENAME_LEN]; + TdFilePtr fdFrom = NULL; + TdFilePtr fdTo = NULL; + + tsdbTFileName(rtner->tsdb, to, fname); + + fdFrom = taosOpenFile(from->fname, TD_FILE_READ); + if (fdFrom == NULL) code = terrno; + TSDB_CHECK_CODE(code, lino, _exit); + + char *object_name = taosDirEntryBaseName(fname); + code = s3PutObjectFromFile(from->fname, object_name); + TSDB_CHECK_CODE(code, lino, _exit); + + taosCloseFile(&fdFrom); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + taosCloseFile(&fdFrom); + } + return code; +} + static int32_t tsdbDoMigrateFileObj(SRTNer *rtner, const STFileObj *fobj, const SDiskID *did) { int32_t code = 0; int32_t lino = 0; @@ -123,6 +174,53 @@ _exit: return code; } +static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const SDiskID *did) { + int32_t code = 0; + int32_t lino = 0; + STFileOp op = {0}; + + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + // create new + op = (STFileOp){ + .optype = TSDB_FOP_CREATE, + .fid = fobj->f->fid, + .nf = + { + .type = fobj->f->type, + .did = did[0], + .fid = fobj->f->fid, + .cid = fobj->f->cid, + .size = fobj->f->size, + .stt[0] = + { + .level = fobj->f->stt[0].level, + }, + }, + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + // do copy the file + code = tsdbCopyFileS3(rtner, fobj, &op.nf); + TSDB_CHECK_CODE(code, lino, _exit); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + } + return code; +} + typedef struct { STsdb *tsdb; int32_t sync; @@ -201,8 +299,14 @@ static int32_t tsdbDoRetention2(void *arg) { for (int32_t ftype = 0; (ftype < TSDB_FTYPE_MAX) && (fobj = rtner->ctx->fset->farr[ftype], 1); ++ftype) { if (fobj == NULL) continue; - code = tsdbDoRemoveFileObject(rtner, fobj); - TSDB_CHECK_CODE(code, lino, _exit); + int32_t nlevel = tfsGetLevel(rtner->tsdb->pVnode->pTfs); + if (tsS3Enabled && nlevel > 1 && TSDB_FTYPE_DATA == ftype && fobj->f->did.level == nlevel - 1) { + code = tsdbRemoveFileObjectS3(rtner, fobj); + TSDB_CHECK_CODE(code, lino, _exit); + } else { + code = tsdbDoRemoveFileObject(rtner, fobj); + TSDB_CHECK_CODE(code, lino, _exit); + } } SSttLvl *lvl; @@ -228,8 +332,15 @@ static int32_t tsdbDoRetention2(void *arg) { if (fobj == NULL) continue; if (fobj->f->did.level == did.level) continue; - code = tsdbDoMigrateFileObj(rtner, fobj, &did); - TSDB_CHECK_CODE(code, lino, _exit); + + int32_t nlevel = tfsGetLevel(rtner->tsdb->pVnode->pTfs); + if (tsS3Enabled && nlevel > 1 && TSDB_FTYPE_DATA == ftype && did.level == nlevel - 1) { + code = tsdbMigrateDataFileS3(rtner, fobj, &did); + TSDB_CHECK_CODE(code, lino, _exit); + } else { + code = tsdbDoMigrateFileObj(rtner, fobj, &did); + TSDB_CHECK_CODE(code, lino, _exit); + } } // stt @@ -281,4 +392,4 @@ int32_t tsdbRetention(STsdb *tsdb, int64_t now, int32_t sync) { tsdbFreeRtnArg(arg); } return code; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index bdcf4a87c1..f547119f49 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -392,6 +392,9 @@ static int32_t tsdbSnapReadTombData(STsdbSnapReader* reader, uint8_t** data) { code = tTombBlockPut(reader->tombBlock, record); TSDB_CHECK_CODE(code, lino, _exit); + code = tsdbIterMergerNext(reader->tombIterMerger); + TSDB_CHECK_CODE(code, lino, _exit); + if (TOMB_BLOCK_SIZE(reader->tombBlock) >= 81920) { break; } diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c index 2dbac956ed..6e89b47adc 100644 --- a/source/dnode/vnode/src/tsdb/tsdbWrite.c +++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c @@ -76,7 +76,7 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq2 *pMsg) { int32_t code = 0; STsdbKeepCfg *pCfg = &pTsdb->keepCfg; TSKEY now = taosGetTimestamp(pCfg->precision); - TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep2; + TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep1; TSKEY maxKey = tsMaxKeyByPrecision[pCfg->precision]; int32_t size = taosArrayGetSize(pMsg->aSubmitTbData); @@ -107,4 +107,4 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq2 *pMsg) { _exit: return code; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c new file mode 100644 index 0000000000..4c76538eb2 --- /dev/null +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -0,0 +1,323 @@ +#define ALLOW_FORBID_FUNC + +#include "vndCos.h" + +extern char tsS3Endpoint[]; +extern char tsS3AccessKeyId[]; +extern char tsS3AccessKeySecret[]; +extern char tsS3BucketName[]; +extern char tsS3AppId[]; + +#ifdef USE_COS +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + +int32_t s3Init() { + if (cos_http_io_initialize(NULL, 0) != COSE_OK) { + return -1; + } + + // set log level, default COS_LOG_WARN + cos_log_set_level(COS_LOG_WARN); + + // set log output, default stderr + cos_log_set_output(NULL); + + return 0; +} + +void s3CleanUp() { cos_http_io_deinitialize(); } + +static void log_status(cos_status_t *s) { + cos_warn_log("status->code: %d", s->code); + if (s->error_code) cos_warn_log("status->error_code: %s", s->error_code); + if (s->error_msg) cos_warn_log("status->error_msg: %s", s->error_msg); + if (s->req_id) cos_warn_log("status->req_id: %s", s->req_id); +} + +static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { + options->config = cos_config_create(options->pool); + + cos_config_t *config = options->config; + + cos_str_set(&config->endpoint, tsS3Endpoint); + cos_str_set(&config->access_key_id, tsS3AccessKeyId); + cos_str_set(&config->access_key_secret, tsS3AccessKeySecret); + cos_str_set(&config->appid, tsS3AppId); + + config->is_cname = is_cname; + + options->ctl = cos_http_controller_create(options->pool, 0); +} + +int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) { + int32_t code = 0; + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket, object, file; + cos_table_t *resp_headers; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_table_t *headers = NULL; + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + cos_str_set(&bucket, tsS3BucketName); + cos_str_set(&file, file_str); + cos_str_set(&object, object_str); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + + cos_pool_destroy(p); + + if (s->code != 200) { + return code = s->code; + } + + return code; +} + +void s3DeleteObjects(const char *object_name[], int nobject) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_list_t object_list; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + + cos_list_init(&object_list); + cos_list_init(&deleted_object_list); + + for (int i = 0; i < nobject; ++i) { + cos_object_key_t *content = cos_create_cos_object_key(p); + cos_str_set(&content->key, object_name[i]); + cos_list_add_tail(&content->node, &object_list); + } + + cos_status_t *s = cos_delete_objects(options, &bucket, &object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + + cos_pool_destroy(p); + + if (cos_status_is_ok(s)) { + cos_warn_log("delete objects succeeded\n"); + } else { + cos_warn_log("delete objects failed\n"); + } +} + +bool s3Exists(const char *object_name) { + bool ret = false; + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_object_exist_status_e object_exist; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + cos_str_set(&object, object_name); + + s = cos_check_object_exist(options, &bucket, &object, headers, &object_exist, &resp_headers); + if (object_exist == COS_OBJECT_NON_EXIST) { + cos_warn_log("object: %.*s non exist.\n", object.len, object.data); + } else if (object_exist == COS_OBJECT_EXIST) { + ret = true; + cos_warn_log("object: %.*s exist.\n", object.len, object.data); + } else { + cos_warn_log("object: %.*s unknown status.\n", object.len, object.data); + log_status(s); + } + + cos_pool_destroy(p); + + return ret; +} + +bool s3Get(const char *object_name, const char *path) { + bool ret = false; + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + int traffic_limit = 0; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + if (traffic_limit) { + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + + //下载对象 + cos_str_set(&file, path); + cos_str_set(&object, object_name); + s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); + if (cos_status_is_ok(s)) { + ret = true; + cos_warn_log("get object succeeded\n"); + } else { + cos_warn_log("get object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); + + return ret; +} + +typedef struct { + int64_t size; + int32_t atime; + char name[TSDB_FILENAME_LEN]; +} SEvictFile; + +static int32_t evictFileCompareAsce(const void *pLeft, const void *pRight) { + SEvictFile *lhs = (SEvictFile *)pLeft; + SEvictFile *rhs = (SEvictFile *)pRight; + return lhs->atime < rhs->atime ? -1 : 1; +} + +void s3EvictCache(const char *path, long object_size) { + SDiskSize disk_size = {0}; + char dir_name[TSDB_FILENAME_LEN] = "\0"; + + tstrncpy(dir_name, path, TSDB_FILENAME_LEN); + taosDirName(dir_name); + + if (taosGetDiskSize((char *)dir_name, &disk_size) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + vError("failed to get disk:%s size since %s", path, terrstr()); + return; + } + + if (object_size >= disk_size.avail - (1 << 30)) { + // evict too old files + // 1, list data files' atime under dir(path) + tdbDirPtr pDir = taosOpenDir(dir_name); + if (pDir == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + vError("failed to open %s since %s", dir_name, terrstr()); + } + SArray *evict_files = taosArrayInit(16, sizeof(SEvictFile)); + tdbDirEntryPtr pDirEntry; + while ((pDirEntry = taosReadDir(pDir)) != NULL) { + char *name = taosGetDirEntryName(pDirEntry); + if (!strncmp(name + strlen(name) - 5, ".data", 5)) { + SEvictFile e_file = {0}; + char entry_name[TSDB_FILENAME_LEN] = "\0"; + int dir_len = strlen(dir_name); + + memcpy(e_file.name, dir_name, dir_len); + e_file.name[dir_len] = '/'; + memcpy(e_file.name + dir_len + 1, name, strlen(name)); + + taosStatFile(e_file.name, &e_file.size, NULL, &e_file.atime); + + taosArrayPush(evict_files, &e_file); + } + } + taosCloseDir(&pDir); + + // 2, sort by atime + taosArraySort(evict_files, evictFileCompareAsce); + + // 3, remove files ascendingly until we get enough object_size space + long evict_size = 0; + size_t ef_size = TARRAY_SIZE(evict_files); + for (size_t i = 0; i < ef_size; ++i) { + SEvictFile *evict_file = taosArrayGet(evict_files, i); + taosRemoveFile(evict_file->name); + evict_size += evict_file->size; + if (evict_size >= object_size) { + break; + } + } + + taosArrayDestroy(evict_files); + } +} + +long s3Size(const char *object_name) { + long size = 0; + + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + + //获取对象元数据 + cos_str_set(&object, object_name); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + // print_headers(resp_headers); + if (cos_status_is_ok(s)) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + size = atol(content_length_str); + } + cos_warn_log("head object succeeded: %ld\n", size); + } else { + cos_warn_log("head object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); + + return size; +} + +#else + +int32_t s3Init() { return 0; } +void s3CleanUp() {} +int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; } +void s3DeleteObjects(const char *object_name[], int nobject) {} +bool s3Exists(const char *object_name) { return false; } +bool s3Get(const char *object_name, const char *path) { return false; } +void s3EvictCache(const char *path, long object_size) {} +long s3Size(const char *object_name) { return 0; } + +#endif diff --git a/source/dnode/vnode/src/vnd/vnodeInitApi.c b/source/dnode/vnode/src/vnd/vnodeInitApi.c index 5c8d563d73..dca8dd271c 100644 --- a/source/dnode/vnode/src/vnd/vnodeInitApi.c +++ b/source/dnode/vnode/src/vnd/vnodeInitApi.c @@ -96,6 +96,10 @@ void initMetadataAPI(SStoreMeta* pMeta) { pMeta->metaGetCachedTbGroup = metaGetCachedTbGroup; pMeta->metaPutTbGroupToCache = metaPutTbGroupToCache; + + pMeta->openCtbCursor = metaOpenCtbCursor; + pMeta->closeCtbCursor = metaCloseCtbCursor; + pMeta->ctbCursorNext = metaCtbCursorNext; } void initTqAPI(SStoreTqReader* pTq) { diff --git a/source/dnode/vnode/src/vnd/vnodeModule.c b/source/dnode/vnode/src/vnd/vnodeModule.c index 74a8d14a86..6ccce5c9d7 100644 --- a/source/dnode/vnode/src/vnd/vnodeModule.c +++ b/source/dnode/vnode/src/vnd/vnodeModule.c @@ -14,6 +14,7 @@ */ #include "vnd.h" +#include "vndCos.h" typedef struct SVnodeTask SVnodeTask; struct SVnodeTask { @@ -81,6 +82,9 @@ int vnodeInit(int nthreads) { if (tqInit() < 0) { return -1; } + if (s3Init() < 0) { + return -1; + } return 0; } @@ -112,6 +116,7 @@ void vnodeCleanup() { walCleanUp(); tqCleanUp(); smaCleanUp(); + s3CleanUp(); } int vnodeScheduleTaskEx(int tpid, int (*execute)(void*), void* arg) { diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 51f4cee40c..48f8ec021d 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -440,7 +440,7 @@ int32_t vnodeGetTableList(void* pVnode, int8_t type, SArray* pList) { } int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list) { - SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, uid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, uid, 1); while (1) { tb_uid_t id = metaCtbCursorNext(pCur); @@ -462,7 +462,7 @@ int32_t vnodeGetCtbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list) { SVnode *pVnodeObj = pVnode; - SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj->pMeta, suid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj, suid, 1); while (1) { tb_uid_t id = metaCtbCursorNext(pCur); @@ -521,7 +521,7 @@ int32_t vnodeGetStbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo } int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) { - SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, suid, 0); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 0); if (!pCur) { return TSDB_CODE_FAILED; } diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 743470aac8..f75c779f4b 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -661,8 +661,6 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) return tqProcessTaskRetrieveRsp(pVnode->pTq, pMsg); case TDMT_VND_STREAM_SCAN_HISTORY: return tqProcessTaskScanHistory(pVnode->pTq, pMsg); - case TDMT_STREAM_TRANSFER_STATE: - return tqProcessTaskTransferStateReq(pVnode->pTq, pMsg); case TDMT_STREAM_SCAN_HISTORY_FINISH: return tqProcessTaskScanHistoryFinishReq(pVnode->pTq, pMsg); case TDMT_STREAM_SCAN_HISTORY_FINISH_RSP: diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 444da63a56..991e2013f5 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -625,7 +625,7 @@ void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg* } } - if (nSma < pCfg->numOfColumns) { + if (nSma < pCfg->numOfColumns && nSma > 0) { bool smaOn = false; *len += sprintf(buf + VARSTR_HEADER_SIZE + *len, " SMA("); for (int32_t i = 0; i < pCfg->numOfColumns; ++i) { diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index e917de33dd..e167b31ef8 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -291,17 +291,17 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i switch (pNode->type) { case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: { STagScanPhysiNode *pTagScanNode = (STagScanPhysiNode *)pNode; - EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->tableName.tname); + EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->scan.tableName.tname); EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); if (pResNode->pExecInfo) { QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - if (pTagScanNode->pScanPseudoCols) { - EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->pScanPseudoCols->length); + if (pTagScanNode->scan.pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->scan.pScanPseudoCols->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->scan.node.pOutputDataBlockDesc->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -309,11 +309,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i if (verbose) { EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, - nodesGetOutputNumFromSlotList(pTagScanNode->node.pOutputDataBlockDesc->pSlots)); + nodesGetOutputNumFromSlotList(pTagScanNode->scan.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); - EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->outputRowSize); - EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->node.pLimit); - EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->node.pSlimit); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->scan.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->scan.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->scan.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 33c9d845b9..f273f63770 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -190,4 +190,6 @@ void printDataBlock(SSDataBlock* pBlock, const char* flag); void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order); void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery); +SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, + SStorageAPI* pStorageAPI); #endif // TDENGINE_EXECUTIL_H diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index fbca5e29f9..dad15dc6bc 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -251,6 +251,12 @@ typedef struct STableMergeScanInfo { SSortExecInfo sortExecInfo; } STableMergeScanInfo; +typedef struct STagScanFilterContext { + SHashObj* colHash; + int32_t index; + SArray* cInfoList; +} STagScanFilterContext; + typedef struct STagScanInfo { SColumnInfo* pCols; SSDataBlock* pRes; @@ -259,6 +265,14 @@ typedef struct STagScanInfo { SLimitNode* pSlimit; SReadHandle readHandle; STableListInfo* pTableListInfo; + uint64_t suid; + void* pCtbCursor; + SNode* pTagCond; + SNode* pTagIndexCond; + STagScanFilterContext filterCtx; + SArray* aUidTags; // SArray + SArray* aFilterIdxs; // SArray + SStorageAPI* pStorageAPI; } STagScanInfo; typedef enum EStreamScanMode { diff --git a/source/libs/executor/inc/operator.h b/source/libs/executor/inc/operator.h index e6c3405d7f..38cefc1cc5 100644 --- a/source/libs/executor/inc/operator.h +++ b/source/libs/executor/inc/operator.h @@ -81,7 +81,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode*pTagIndexCond, SExecTaskInfo* pTaskInfo); SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo); diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index 7241b015a0..0742b9ba4c 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -99,7 +99,7 @@ struct SExecTaskInfo { void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst); SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model, SStorageAPI* pAPI); void doDestroyTask(SExecTaskInfo* pTaskInfo); -bool isTaskKilled(SExecTaskInfo* pTaskInfo); +bool isTaskKilled(void* pTaskInfo); void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode); void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status); int32_t createExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index 57c8bce275..3180173ca7 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -191,7 +191,8 @@ int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols); bool tsortIsClosed(SSortHandle* pHandle); void tsortSetClosed(SSortHandle* pHandle); -void setSingleTableMerge(SSortHandle* pHandle); +void tsortSetSingleTableMerge(SSortHandle* pHandle); +void tsortSetAbortCheckFn(SSortHandle* pHandle, bool (*checkFn)(void* param), void* param); #ifdef __cplusplus } diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index aa0c7945b0..4b3e223f38 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -47,8 +47,6 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* p static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI); -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, - SStorageAPI* pStorageAPI); static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; } static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; } @@ -846,7 +844,7 @@ static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, S return -1; } -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, +SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, SStorageAPI* pStorageAPI) { SSDataBlock* pResBlock = createDataBlock(); if (pResBlock == NULL) { @@ -1677,6 +1675,7 @@ SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) { .intervalUnit = pTableScanNode->intervalUnit, .slidingUnit = pTableScanNode->slidingUnit, .offset = pTableScanNode->offset, + .precision = pTableScanNode->scan.node.pOutputDataBlockDesc->precision, }; return interval; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index a6059c7c42..0ad2b9c116 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -589,6 +589,10 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo int64_t st = taosGetTimestampUs(); int32_t blockIndex = 0; + int32_t rowsThreshold = pTaskInfo->pSubplan->rowsThreshold; + if (!pTaskInfo->pSubplan->dynamicRowThreshold || 4096 <= pTaskInfo->pSubplan->rowsThreshold) { + rowsThreshold = 4096; + } while ((pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot)) != NULL) { SSDataBlock* p = NULL; if (blockIndex >= taosArrayGetSize(pTaskInfo->pResultBlockList)) { @@ -606,10 +610,13 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo ASSERT(p->info.rows > 0); taosArrayPush(pResList, &p); - if (current >= 4096) { + if (current >= rowsThreshold) { break; } } + if (pTaskInfo->pSubplan->dynamicRowThreshold) { + pTaskInfo->pSubplan->rowsThreshold -= current; + } *hasMore = (pRes != NULL); uint64_t el = (taosGetTimestampUs() - st); diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 8ddcc8fd15..7f0c5baa36 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -370,17 +370,18 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR STableCountScanPhysiNode* pTblCountScanNode = (STableCountScanPhysiNode*)pPhyNode; pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { - STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode; + STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); - int32_t code = createScanTableListInfo(pScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, - pTagIndexCond, pTaskInfo); - if (code != TSDB_CODE_SUCCESS) { - pTaskInfo->code = code; - qError("failed to getTableList, code: %s", tstrerror(code)); - return NULL; + if (!pTagScanPhyNode->onlyMetaCtbIdx) { + int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, + pTagIndexCond, pTaskInfo); + if (code != TSDB_CODE_SUCCESS) { + pTaskInfo->code = code; + qError("failed to getTableList, code: %s", tstrerror(code)); + return NULL; + } } - - pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTaskInfo); + pOperator = createTagScanOperatorInfo(pHandle, pTagScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN == type) { SBlockDistScanPhysiNode* pBlockNode = (SBlockDistScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); diff --git a/source/libs/executor/src/querytask.c b/source/libs/executor/src/querytask.c index 22d171e74a..980ef1a61a 100644 --- a/source/libs/executor/src/querytask.c +++ b/source/libs/executor/src/querytask.c @@ -59,7 +59,7 @@ SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOP return pTaskInfo; } -bool isTaskKilled(SExecTaskInfo* pTaskInfo) { return (0 != pTaskInfo->code); } +bool isTaskKilled(void* pTaskInfo) { return (0 != ((SExecTaskInfo*)pTaskInfo)->code); } void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode) { pTaskInfo->code = rspCode; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 9a836caa8c..09b45d1c9d 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2688,7 +2688,236 @@ static void doTagScanOneTable(SOperatorInfo* pOperator, const SSDataBlock* pRes, } } -static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { +static void tagScanFreeUidTag(void* p) { + STUidTagInfo* pInfo = p; + if (pInfo->pTagVal != NULL) { + taosMemoryFree(pInfo->pTagVal); + } +} + +static int32_t tagScanCreateResultData(SDataType* pType, int32_t numOfRows, SScalarParam* pParam) { + SColumnInfoData* pColumnData = taosMemoryCalloc(1, sizeof(SColumnInfoData)); + if (pColumnData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return terrno; + } + + pColumnData->info.type = pType->type; + pColumnData->info.bytes = pType->bytes; + pColumnData->info.scale = pType->scale; + pColumnData->info.precision = pType->precision; + + int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows, true); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + taosMemoryFree(pColumnData); + return terrno; + } + + pParam->columnData = pColumnData; + pParam->colAlloced = true; + return TSDB_CODE_SUCCESS; +} + +static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { + SColumnNode* pSColumnNode = NULL; + if (QUERY_NODE_COLUMN == nodeType((*pNode))) { + pSColumnNode = *(SColumnNode**)pNode; + } else if (QUERY_NODE_FUNCTION == nodeType((*pNode))) { + SFunctionNode* pFuncNode = *(SFunctionNode**)(pNode); + if (pFuncNode->funcType == FUNCTION_TYPE_TBNAME) { + pSColumnNode = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == pSColumnNode) { + return DEAL_RES_ERROR; + } + pSColumnNode->colId = -1; + pSColumnNode->colType = COLUMN_TYPE_TBNAME; + pSColumnNode->node.resType.type = TSDB_DATA_TYPE_VARCHAR; + pSColumnNode->node.resType.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE; + nodesDestroyNode(*pNode); + *pNode = (SNode*)pSColumnNode; + } else { + return DEAL_RES_CONTINUE; + } + } else { + return DEAL_RES_CONTINUE; + } + + STagScanFilterContext* pCtx = (STagScanFilterContext*)pContext; + void* data = taosHashGet(pCtx->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId)); + if (!data) { + taosHashPut(pCtx->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode))); + pSColumnNode->slotId = pCtx->index++; + SColumnInfo cInfo = {.colId = pSColumnNode->colId, + .type = pSColumnNode->node.resType.type, + .bytes = pSColumnNode->node.resType.bytes}; + taosArrayPush(pCtx->cInfoList, &cInfo); + } else { + SColumnNode* col = *(SColumnNode**)data; + pSColumnNode->slotId = col->slotId; + } + + return DEAL_RES_CONTINUE; +} + + +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI, STagScanInfo* pInfo) { + int32_t code = 0; + int32_t numOfTables = taosArrayGetSize(aUidTags); + + SSDataBlock* pResBlock = createTagValBlockForFilter(pInfo->filterCtx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); + + SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); + taosArrayPush(pBlockList, &pResBlock); + SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; + + SScalarParam output = {0}; + tagScanCreateResultData(&type, numOfTables, &output); + + scalarCalculate(pTagCond, pBlockList, &output); + + bool* result = (bool*)output.columnData->pData; + for (int32_t i = 0 ; i < numOfTables; ++i) { + if (result[i]) { + taosArrayPush(aFilterIdxs, &i); + } + } + + colDataDestroy(output.columnData); + taosMemoryFreeClear(output.columnData); + + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + + +} + +static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { + if (fmIsScanPseudoColumnFunc(pExprInfo->pExpr->_function.functionId)) { // tbname + char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; +// if (pUidTagInfo->name != NULL) { +// STR_TO_VARSTR(str, pUidTagInfo->name); +// } else { // name is not retrieved during filter +// pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); +// } + STR_TO_VARSTR(str, "ctbidx"); + + colDataSetVal(pColInfo, rowIndex, str, false); + } else { + STagVal tagVal = {0}; + tagVal.cid = pExprInfo->base.pParam[0].pCol->colId; + if (pUidTagInfo->pTagVal == NULL) { + colDataSetNULL(pColInfo, rowIndex); + } else { + const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pColInfo->info.type, &tagVal); + + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { + colDataSetNULL(pColInfo, rowIndex); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { + colDataSetVal(pColInfo, rowIndex, p, false); + } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { + char* tmp = taosMemoryMalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1); + varDataSetLen(tmp, tagVal.nData); + memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); + colDataSetVal(pColInfo, rowIndex, tmp, false); + taosMemoryFree(tmp); + } else { + colDataSetVal(pColInfo, rowIndex, (const char*)&tagVal.i64, false); + } + } + } +} + +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, bool ignoreFilterIdx, + SStorageAPI* pAPI) { + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + if (!ignoreFilterIdx) { + size_t szTables = taosArrayGetSize(aFilterIdxs); + for (int i = 0; i < szTables; ++i) { + int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + } + } + } else { + size_t szTables = taosArrayGetSize(aUidTags); + for (int i = 0; i < szTables; ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + } + } + } + return 0; +} + +static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStorageAPI* pAPI = &pTaskInfo->storageAPI; + + STagScanInfo* pInfo = pOperator->info; + SSDataBlock* pRes = pInfo->pRes; + blockDataCleanup(pRes); + + if (pInfo->pCtbCursor == NULL) { + pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); + } + + SArray* aUidTags = pInfo->aUidTags; + SArray* aFilterIdxs = pInfo->aFilterIdxs; + int32_t count = 0; + + while (1) { + taosArrayClearEx(aUidTags, tagScanFreeUidTag); + taosArrayClear(aFilterIdxs); + + int32_t numTables = 0; + while (numTables < pOperator->resultInfo.capacity) { + SMCtbCursor* pCur = pInfo->pCtbCursor; + tb_uid_t uid = pAPI->metaFn.ctbCursorNext(pInfo->pCtbCursor); + if (uid == 0) { + break; + } + STUidTagInfo info = {.uid = uid, .pTagVal = pCur->pVal}; + info.pTagVal = taosMemoryMalloc(pCur->vLen); + memcpy(info.pTagVal, pCur->pVal, pCur->vLen); + taosArrayPush(aUidTags, &info); + ++numTables; + } + + if (numTables == 0) { + break; + } + bool ignoreFilterIdx = true; + if (pInfo->pTagCond != NULL) { + ignoreFilterIdx = false; + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI, pInfo); + } else { + ignoreFilterIdx = true; + } + + tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, ignoreFilterIdx, pAPI); + + count = ignoreFilterIdx ? taosArrayGetSize(aUidTags): taosArrayGetSize(aFilterIdxs); + + if (count != 0) { + break; + } + } + + pRes->info.rows = count; + pOperator->resultInfo.totalRows += count; + return (pRes->info.rows == 0) ? NULL : pInfo->pRes; +} + +static SSDataBlock* doTagScanFromMetaEntry(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; } @@ -2746,14 +2975,23 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { static void destroyTagScanOperatorInfo(void* param) { STagScanInfo* pInfo = (STagScanInfo*)param; + if (pInfo->pCtbCursor != NULL) { + pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); + } + taosHashCleanup(pInfo->filterCtx.colHash); + taosArrayDestroy(pInfo->filterCtx.cInfoList); + taosArrayDestroy(pInfo->aFilterIdxs); + taosArrayDestroyEx(pInfo->aUidTags, tagScanFreeUidTag); + pInfo->pRes = blockDataDestroy(pInfo->pRes); taosArrayDestroy(pInfo->matchInfo.pList); pInfo->pTableListInfo = tableListDestroy(pInfo->pTableListInfo); taosMemoryFreeClear(param); } -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, - STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) { +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pTagScanNode, + STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { + SScanPhysiNode* pPhyNode = (SScanPhysiNode*)pTagScanNode; STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -2775,6 +3013,11 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi goto _error; } + pInfo->pTagCond = pTagCond; + pInfo->pTagIndexCond = pTagIndexCond; + pInfo->suid = pPhyNode->suid; + pInfo->pStorageAPI = &pTaskInfo->storageAPI; + pInfo->pTableListInfo = pTableListInfo; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; @@ -2786,8 +3029,18 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); + if (pTagScanNode->onlyMetaCtbIdx) { + pInfo->aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); + pInfo->aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + pInfo->filterCtx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + pInfo->filterCtx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + if (pInfo->pTagCond != NULL) { + nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&pInfo->filterCtx); + } + } + __optr_fn_t tagScanNextFn = (pTagScanNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; pOperator->fpSet = - createOperatorFpSet(optrDummyOpenFn, doTagScan, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); + createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); return pOperator; @@ -2928,8 +3181,9 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_BLOCK_TS_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->pSortInputBlock, pTaskInfo->id.str, 0, 0, 0); - + tsortSetMergeLimit(pInfo->pSortHandle, mergeLimit); + tsortSetAbortCheckFn(pInfo->pSortHandle, isTaskKilled, pOperator->pTaskInfo); } tsortSetFetchRawDataFp(pInfo->pSortHandle, getBlockForTableMergeScan, NULL, NULL); @@ -2949,7 +3203,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { int32_t code = TSDB_CODE_SUCCESS; if (numOfTable == 1) { - setSingleTableMerge(pInfo->pSortHandle); + tsortSetSingleTableMerge(pInfo->pSortHandle); } else { code = tsortOpen(pInfo->pSortHandle); } diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 019a9f4d2c..9163495bfa 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -1600,6 +1600,11 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { SSysTableScanInfo* pInfo = pOperator->info; char dbName[TSDB_DB_NAME_LEN] = {0}; + if (isTaskKilled(pOperator->pTaskInfo)) { + setOperatorCompleted(pOperator); + return NULL; + } + blockDataCleanup(pInfo->pRes); const char* name = tNameGetTableName(&pInfo->name); diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index b019985645..c9824ff2d6 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -848,6 +848,10 @@ static void doHandleTimeslice(SOperatorInfo* pOperator, SSDataBlock* pBlock) { bool ignoreNull = getIgoreNullRes(pSup); int32_t order = TSDB_ORDER_ASC; + if (checkWindowBoundReached(pSliceInfo)) { + return; + } + int32_t code = initKeeperInfo(pSliceInfo, pBlock, &pOperator->exprSupp); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, code); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 6884855ece..b50b8ca2c8 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1595,6 +1595,10 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) { colDataDestroy(&pInfo->twAggSup.timeWindowData); pInfo->groupResInfo.pRows = taosArrayDestroy(pInfo->groupResInfo.pRows); cleanupExprSupp(&pInfo->scalarSupp); + tSimpleHashCleanup(pInfo->pUpdatedMap); + pInfo->pUpdatedMap = NULL; + pInfo->pUpdated = taosArrayDestroy(pInfo->pUpdated); + taosMemoryFreeClear(param); } diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 1891e93c61..6c4a780dfb 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -71,12 +71,20 @@ struct SSortHandle { SMultiwayMergeTreeInfo* pMergeTree; bool singleTableMerge; + + bool (*abortCheckFn)(void* param); + void* abortCheckParam; }; -void setSingleTableMerge(SSortHandle* pHandle) { +void tsortSetSingleTableMerge(SSortHandle* pHandle) { pHandle->singleTableMerge = true; } +void tsortSetAbortCheckFn(SSortHandle *pHandle, bool (*checkFn)(void *), void* param) { + pHandle->abortCheckFn = checkFn; + pHandle->abortCheckParam = param; +} + static int32_t msortComparFn(const void* pLeft, const void* pRight, void* param); // | offset[0] | offset[1] |....| nullbitmap | data |...| @@ -726,11 +734,10 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t)); while (1) { - if (tsortIsClosed(pHandle)) { + if (tsortIsClosed(pHandle) || (pHandle->abortCheckFn && pHandle->abortCheckFn(pHandle->abortCheckParam))) { code = terrno = TSDB_CODE_TSC_QUERY_CANCELLED; return code; } - SSDataBlock* pDataBlock = getSortedBlockDataInner(pHandle, &pHandle->cmpParam, numOfRows); if (pDataBlock == NULL) { break; diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index c3afc30a7b..d2f19ed2eb 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -127,7 +127,10 @@ int32_t derivativeFunction(SqlFunctionCtx* pCtx); bool getIrateFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); bool irateFuncSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo); int32_t irateFunction(SqlFunctionCtx* pCtx); +int32_t irateFunctionMerge(SqlFunctionCtx* pCtx); int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +int32_t iratePartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +int32_t getIrateInfoSize(); int32_t cachedLastRowFunction(SqlFunctionCtx* pCtx); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index cee4000155..9a301b7f1c 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1567,6 +1567,45 @@ static int32_t translateIrate(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return TSDB_CODE_SUCCESS; } +static int32_t translateIrateImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { + uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + if (isPartial) { + if (3 != LIST_LENGTH(pFunc->pParameterList)) { + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + } + if (!IS_NUMERIC_TYPE(colType)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + pFunc->node.resType = (SDataType){.bytes = getIrateInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; + } else { + if (1 != LIST_LENGTH(pFunc->pParameterList)) { + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + } + if (TSDB_DATA_TYPE_BINARY != colType) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; + + // add database precision as param + uint8_t dbPrec = pFunc->node.resType.precision; + int32_t code = addDbPrecisonParam(&pFunc->pParameterList, dbPrec); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + + + return TSDB_CODE_SUCCESS; +} + +static int32_t translateIratePartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + return translateIrateImpl(pFunc, pErrBuf, len, true); +} + +static int32_t translateIrateMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + return translateIrateImpl(pFunc, pErrBuf, len, false); +} + static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); uint8_t dbPrec = pFunc->node.resType.precision; @@ -2604,6 +2643,31 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = irateFuncSetup, .processFunc = irateFunction, .sprocessFunc = irateScalarFunction, + .finalizeFunc = irateFinalize, + .pPartialFunc = "_irate_partial", + .pMergeFunc = "_irate_merge" + }, + { + .name = "_irate_partial", + .type = FUNCTION_TYPE_IRATE_PARTIAL, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | + FUNC_MGT_FORBID_SYSTABLE_FUNC, + .translateFunc = translateIratePartial, + .getEnvFunc = getIrateFuncEnv, + .initFunc = irateFuncSetup, + .processFunc = irateFunction, + .sprocessFunc = irateScalarFunction, + .finalizeFunc = iratePartialFinalize + }, + { + .name = "_irate_merge", + .type = FUNCTION_TYPE_IRATE_MERGE, + .classification = FUNC_MGT_AGG_FUNC, + .translateFunc = translateIrateMerge, + .getEnvFunc = getIrateFuncEnv, + .initFunc = irateFuncSetup, + .processFunc = irateFunctionMerge, + .sprocessFunc = irateScalarFunction, .finalizeFunc = irateFinalize }, { diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index fad8c9ca5b..bcbb3af950 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -5768,6 +5768,8 @@ int32_t derivativeFunction(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } +int32_t getIrateInfoSize() { return (int32_t)sizeof(SRateInfo); } + bool getIrateFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv) { pEnv->calcMemSize = sizeof(SRateInfo); return true; @@ -5817,6 +5819,7 @@ int32_t irateFunction(SqlFunctionCtx* pCtx) { if (INT64_MIN == pRateInfo->lastKey) { pRateInfo->lastValue = v; pRateInfo->lastKey = tsList[i]; + pRateInfo->hasResult = 1; continue; } @@ -5868,6 +5871,99 @@ static double doCalcRate(const SRateInfo* pRateInfo, double tickPerSec) { return (duration > 0) ? ((double)diff) / (duration / tickPerSec) : 0.0; } +static void irateTransferInfoImpl(TSKEY inputKey, SRateInfo* pInput, SRateInfo* pOutput, bool isFirstKey) { + if (inputKey > pOutput->lastKey) { + pOutput->firstKey = pOutput->lastKey; + pOutput->firstValue = pOutput->lastValue; + + pOutput->lastKey = isFirstKey ? pInput->firstKey : pInput->lastKey; + pOutput->lastValue = isFirstKey ? pInput->firstValue : pInput->lastValue; + } else if ((inputKey < pOutput->lastKey) && (inputKey > pOutput->firstKey)) { + pOutput->firstKey = isFirstKey ? pInput->firstKey : pInput->lastKey; + pOutput->firstValue = isFirstKey ? pInput->firstValue : pInput->lastValue; + } else { + // inputKey < pOutput->firstKey + } +} + +static void irateCopyInfo(SRateInfo* pInput, SRateInfo* pOutput) { + pOutput->firstKey = pInput->firstKey; + pOutput->lastKey = pInput->lastKey; + + pOutput->firstValue = pInput->firstValue; + pOutput->lastValue = pInput->lastValue; +} + +static int32_t irateTransferInfo(SRateInfo* pInput, SRateInfo* pOutput) { + if ((pInput->firstKey != INT64_MIN && (pInput->firstKey == pOutput->firstKey || pInput->firstKey == pOutput->lastKey)) || + (pInput->lastKey != INT64_MIN && (pInput->lastKey == pOutput->firstKey || pInput->lastKey == pOutput->lastKey))) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + + if (pOutput->hasResult == 0) { + irateCopyInfo(pInput, pOutput); + pOutput->hasResult = pInput->hasResult; + return TSDB_CODE_SUCCESS; + } + + if (pInput->firstKey != INT64_MIN) { + irateTransferInfoImpl(pInput->firstKey, pInput, pOutput, true); + } + + if (pInput->lastKey != INT64_MIN) { + irateTransferInfoImpl(pInput->lastKey, pInput, pOutput, false); + } + + pOutput->hasResult = pInput->hasResult; + return TSDB_CODE_SUCCESS; +} + +int32_t irateFunctionMerge(SqlFunctionCtx* pCtx) { + SInputColumnInfoData* pInput = &pCtx->input; + SColumnInfoData* pCol = pInput->pData[0]; + if (pCol->info.type != TSDB_DATA_TYPE_BINARY) { + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + } + + SRateInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + + int32_t start = pInput->startRowIndex; + for (int32_t i = start; i < start + pInput->numOfRows; ++i) { + char* data = colDataGetData(pCol, i); + SRateInfo* pInputInfo = (SRateInfo*)varDataVal(data); + if (pInputInfo->hasResult) { + int32_t code = irateTransferInfo(pInputInfo, pInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + } + + if (pInfo->hasResult) { + GET_RES_INFO(pCtx)->numOfRes = 1; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t iratePartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { + SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); + SRateInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + int32_t resultBytes = getIrateInfoSize(); + char* res = taosMemoryCalloc(resultBytes + VARSTR_HEADER_SIZE, sizeof(char)); + + memcpy(varDataVal(res), pInfo, resultBytes); + varDataSetLen(res, resultBytes); + + int32_t slotId = pCtx->pExpr->base.resSchema.slotId; + SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); + + colDataSetVal(pCol, pBlock->info.rows, res, false); + + taosMemoryFree(res); + return pResInfo->numOfRes; +} + int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t slotId = pCtx->pExpr->base.resSchema.slotId; SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 7371017111..575bce09bb 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -378,9 +378,9 @@ int32_t udfdInitializePythonPlugin(SUdfScriptPlugin *plugin) { "pyUdfDestroy", "pyUdfScalarProc", "pyUdfAggStart", "pyUdfAggFinish", "pyUdfAggProc", "pyUdfAggMerge"}; void **funcs[UDFD_MAX_PLUGIN_FUNCS] = { - (void **)&plugin->openFunc, (void **)&plugin->closeFunc, (void **)&plugin->udfInitFunc, - (void **)&plugin->udfDestroyFunc, (void **)&plugin->udfScalarProcFunc, (void **)&plugin->udfAggStartFunc, - (void **)&plugin->udfAggFinishFunc, (void **)&plugin->udfAggProcFunc, (void **)&plugin->udfAggMergeFunc}; + (void **)&plugin->openFunc, (void **)&plugin->closeFunc, (void **)&plugin->udfInitFunc, + (void **)&plugin->udfDestroyFunc, (void **)&plugin->udfScalarProcFunc, (void **)&plugin->udfAggStartFunc, + (void **)&plugin->udfAggFinishFunc, (void **)&plugin->udfAggProcFunc, (void **)&plugin->udfAggMergeFunc}; int32_t err = udfdLoadSharedLib(plugin->libPath, &plugin->lib, funcName, funcs, UDFD_MAX_PLUGIN_FUNCS); if (err != 0) { fnError("can not load python plugin. lib path %s", plugin->libPath); @@ -848,7 +848,7 @@ int32_t udfdSaveFuncBodyToFile(SFuncInfo *pFuncInfo, SUdf *udf) { char path[PATH_MAX] = {0}; udfdGetFuncBodyPath(udf, path); - bool fileExist = !(taosStatFile(path, NULL, NULL) < 0); + bool fileExist = !(taosStatFile(path, NULL, NULL, NULL) < 0); if (fileExist) { strncpy(udf->path, path, PATH_MAX); fnInfo("udfd func body file. reuse existing file %s", path); diff --git a/source/libs/index/src/indexFstFile.c b/source/libs/index/src/indexFstFile.c index e18d0bbad3..43f15f5196 100644 --- a/source/libs/index/src/indexFstFile.c +++ b/source/libs/index/src/indexFstFile.c @@ -162,7 +162,7 @@ static FORCE_INLINE int idxFileCtxGetSize(IFileCtx* ctx) { return ctx->offset; } else { int64_t file_size = 0; - taosStatFile(ctx->file.buf, &file_size, NULL); + taosStatFile(ctx->file.buf, &file_size, NULL, NULL); return (int)file_size; } } @@ -199,7 +199,7 @@ IFileCtx* idxFileCtxCreate(WriterType type, const char* path, bool readOnly, int code = taosFtruncateFile(ctx->file.pFile, 0); UNUSED(code); - code = taosStatFile(path, &ctx->file.size, NULL); + code = taosStatFile(path, &ctx->file.size, NULL, NULL); UNUSED(code); ctx->file.wBufOffset = 0; diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index f5eacf0bd5..d3cbaac5e1 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -564,7 +564,9 @@ static int32_t physiScanCopy(const SScanPhysiNode* pSrc, SScanPhysiNode* pDst) { } static int32_t physiTagScanCopy(const STagScanPhysiNode* pSrc, STagScanPhysiNode* pDst) { - return physiScanCopy(pSrc, pDst); + COPY_BASE_OBJECT_FIELD(scan, physiScanCopy); + COPY_SCALAR_FIELD(onlyMetaCtbIdx); + return TSDB_CODE_SUCCESS; } static int32_t physiTableScanCopy(const STableScanPhysiNode* pSrc, STableScanPhysiNode* pDst) { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index f25616065e..1e4eb510d9 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -197,8 +197,10 @@ const char* nodesNodeName(ENodeType type) { return "ShowDnodesStmt"; case QUERY_NODE_SHOW_MNODES_STMT: return "ShowMnodesStmt"; +/* case QUERY_NODE_SHOW_MODULES_STMT: return "ShowModulesStmt"; +*/ case QUERY_NODE_SHOW_QNODES_STMT: return "ShowQnodesStmt"; case QUERY_NODE_SHOW_SNODES_STMT: @@ -660,6 +662,7 @@ static const char* jkScanLogicPlanDynamicScanFuncs = "DynamicScanFuncs"; static const char* jkScanLogicPlanDataRequired = "DataRequired"; static const char* jkScanLogicPlanTagCond = "TagCond"; static const char* jkScanLogicPlanGroupTags = "GroupTags"; +static const char* jkScanLogicPlanOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) { const SScanLogicNode* pNode = (const SScanLogicNode*)pObj; @@ -701,7 +704,9 @@ static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = nodeListToJson(pJson, jkScanLogicPlanGroupTags, pNode->pGroupTags); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkScanLogicPlanOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); + } return code; } @@ -746,7 +751,10 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkScanLogicPlanGroupTags, &pNode->pGroupTags); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkScanLogicPlanOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); + } + return code; } @@ -1562,7 +1570,7 @@ static const char* jkScanPhysiPlanTableName = "TableName"; static const char* jkScanPhysiPlanGroupOrderScan = "GroupOrderScan"; static int32_t physiScanNodeToJson(const void* pObj, SJson* pJson) { - const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + const SScanPhysiNode* pNode = (const SScanPhysiNode*)pObj; int32_t code = physicPlanNodeToJson(pObj, pJson); if (TSDB_CODE_SUCCESS == code) { @@ -1591,7 +1599,7 @@ static int32_t physiScanNodeToJson(const void* pObj, SJson* pJson) { } static int32_t jsonToPhysiScanNode(const SJson* pJson, void* pObj) { - STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + SScanPhysiNode* pNode = (SScanPhysiNode*)pObj; int32_t code = jsonToPhysicPlanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { @@ -1619,6 +1627,30 @@ static int32_t jsonToPhysiScanNode(const SJson* pJson, void* pObj) { return code; } +static const char* jkTagScanPhysiOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; + +static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { + const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + + int32_t code = physiScanNodeToJson(pObj, pJson); + + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkTagScanPhysiOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); + } + return code; +} + +static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { + STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + + int32_t code = jsonToPhysiScanNode(pJson, pObj); + + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); + } + return code; +} + static const char* jkLastRowScanPhysiPlanGroupTags = "GroupTags"; static const char* jkLastRowScanPhysiPlanGroupSort = "GroupSort"; @@ -2814,6 +2846,8 @@ static const char* jkSubplanDataSink = "DataSink"; static const char* jkSubplanTagCond = "TagCond"; static const char* jkSubplanTagIndexCond = "TagIndexCond"; static const char* jkSubplanShowRewrite = "ShowRewrite"; +static const char* jkSubplanRowsThreshold = "RowThreshold"; +static const char* jkSubplanDynamicRowsThreshold = "DyRowThreshold"; static int32_t subplanToJson(const void* pObj, SJson* pJson) { const SSubplan* pNode = (const SSubplan*)pObj; @@ -2852,6 +2886,12 @@ static int32_t subplanToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkSubplanShowRewrite, pNode->showRewrite); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkSubplanRowsThreshold, pNode->rowsThreshold); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkSubplanDynamicRowsThreshold, pNode->dynamicRowThreshold); + } return code; } @@ -2893,6 +2933,12 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkSubplanShowRewrite, &pNode->showRewrite); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkSubplanRowsThreshold, &pNode->rowsThreshold); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkSubplanDynamicRowsThreshold, &pNode->dynamicRowThreshold); + } return code; } @@ -6590,6 +6636,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { case QUERY_NODE_LOGIC_PLAN: return logicPlanToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + return physiTagScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: return physiScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: @@ -6908,6 +6955,7 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { case QUERY_NODE_LOGIC_PLAN: return jsonToLogicPlan(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + return jsonToPhysiTagScanNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: case QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN: return jsonToPhysiScanNode(pJson, pObj); diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 20e829766d..74d7aad6e8 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2003,6 +2003,43 @@ static int32_t msgToPhysiScanNode(STlvDecoder* pDecoder, void* pObj) { return code; } +enum { + PHY_TAG_SCAN_CODE_SCAN = 1, + PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX +}; + +static int32_t physiTagScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { + const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + + int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN, physiScanNodeToMsg, &pNode->scan); + + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX, pNode->onlyMetaCtbIdx); + } + return code; +} + +static int32_t msgToPhysiTagScanNode(STlvDecoder* pDecoder, void* pObj) { + STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + + int32_t code = TSDB_CODE_SUCCESS; + STlv* pTlv = NULL; + tlvForEach(pDecoder, pTlv, code) { + switch (pTlv->type) { + case PHY_TAG_SCAN_CODE_SCAN: + code = tlvDecodeObjFromTlv(pTlv, msgToPhysiScanNode, &pNode->scan); + break; + case PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX: + code = tlvDecodeBool(pTlv, &pNode->onlyMetaCtbIdx); + break; + default: + break; + } + } + + return code; +} + enum { PHY_LAST_ROW_SCAN_CODE_SCAN = 1, PHY_LAST_ROW_SCAN_CODE_GROUP_TAGS, @@ -3538,6 +3575,12 @@ static int32_t subplanInlineToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeValueBool(pEncoder, pNode->showRewrite); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeValueI32(pEncoder, pNode->rowsThreshold); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeValueBool(pEncoder, pNode->dynamicRowThreshold); + } return code; } @@ -3587,6 +3630,12 @@ static int32_t msgToSubplanInline(STlvDecoder* pDecoder, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tlvDecodeValueBool(pDecoder, &pNode->showRewrite); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvDecodeValueI32(pDecoder, &pNode->rowsThreshold); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvDecodeValueBool(pDecoder, &pNode->dynamicRowThreshold); + } return code; } @@ -3726,6 +3775,8 @@ static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { code = caseWhenNodeToMsg(pObj, pEncoder); break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + code = physiTagScanNodeToMsg(pObj, pEncoder); + break; case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: code = physiScanNodeToMsg(pObj, pEncoder); break; @@ -3869,6 +3920,8 @@ static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) { code = msgToCaseWhenNode(pDecoder, pObj); break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + code = msgToPhysiTagScanNode(pDecoder, pObj); + break; case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: code = msgToPhysiScanNode(pDecoder, pObj); break; diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index c8197721fb..75b63b9d34 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -406,7 +406,7 @@ SNode* nodesMakeNode(ENodeType type) { return makeNode(type, sizeof(SRevokeStmt)); case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT: @@ -982,7 +982,7 @@ void nodesDestroyNode(SNode* pNode) { break; case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT: diff --git a/source/libs/nodes/test/nodesCloneTest.cpp b/source/libs/nodes/test/nodesCloneTest.cpp index e1e99abab3..8b8893d317 100644 --- a/source/libs/nodes/test/nodesCloneTest.cpp +++ b/source/libs/nodes/test/nodesCloneTest.cpp @@ -199,9 +199,10 @@ TEST_F(NodesCloneTest, physiScan) { ASSERT_EQ(nodeType(pSrc), nodeType(pDst)); STagScanPhysiNode* pSrcNode = (STagScanPhysiNode*)pSrc; STagScanPhysiNode* pDstNode = (STagScanPhysiNode*)pDst; - ASSERT_EQ(pSrcNode->uid, pDstNode->uid); - ASSERT_EQ(pSrcNode->suid, pDstNode->suid); - ASSERT_EQ(pSrcNode->tableType, pDstNode->tableType); + ASSERT_EQ(pSrcNode->scan.uid, pDstNode->scan.uid); + ASSERT_EQ(pSrcNode->scan.suid, pDstNode->scan.suid); + ASSERT_EQ(pSrcNode->scan.tableType, pDstNode->scan.tableType); + ASSERT_EQ(pSrcNode->onlyMetaCtbIdx, pDstNode->onlyMetaCtbIdx); }); std::unique_ptr srcNode(nullptr, nodesDestroyNode); diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index fdec9cba79..86b4566d37 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -690,8 +690,10 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_MNODES_STMT: return collectMetaKeyFromShowMnodes(pCxt, (SShowStmt*)pStmt); +/* case QUERY_NODE_SHOW_MODULES_STMT: return collectMetaKeyFromShowModules(pCxt, (SShowStmt*)pStmt); +*/ case QUERY_NODE_SHOW_QNODES_STMT: return collectMetaKeyFromShowQnodes(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_SNODES_STMT: diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c index 9b2ac662c8..6a26dcfa8b 100644 --- a/source/libs/parser/src/parAuthenticator.c +++ b/source/libs/parser/src/parAuthenticator.c @@ -263,7 +263,7 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { return authAlterTable(pCxt, (SAlterTableStmt*)pStmt); case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT: diff --git a/source/libs/parser/src/parInsertSml.c b/source/libs/parser/src/parInsertSml.c index 9bb559d441..917848e121 100644 --- a/source/libs/parser/src/parInsertSml.c +++ b/source/libs/parser/src/parInsertSml.c @@ -22,7 +22,7 @@ static void clearColValArray(SArray* pCols) { int32_t num = taosArrayGetSize(pCols); for (int32_t i = 0; i < num; ++i) { SColVal* pCol = taosArrayGet(pCols, i); - if (TSDB_DATA_TYPE_NCHAR == pCol->type) { + if (TSDB_DATA_TYPE_NCHAR == pCol->type || TSDB_DATA_TYPE_GEOMETRY == pCol->type) { taosMemoryFreeClear(pCol->value.pData); } pCol->flag = CV_FLAG_NONE; @@ -240,6 +240,10 @@ int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* data, int32 } else if (kv->type == TSDB_DATA_TYPE_BINARY || kv->type == TSDB_DATA_TYPE_VARBINARY || kv->type == TSDB_DATA_TYPE_GEOMETRY) { pVal->value.nData = kv->length; pVal->value.pData = (uint8_t*)kv->value; + } else if (kv->type == TSDB_DATA_TYPE_GEOMETRY) { + pVal->value.nData = kv->length; + pVal->value.pData = taosMemoryMalloc(kv->length); + memcpy(pVal->value.pData, (uint8_t*)kv->value, kv->length); } else { memcpy(&pVal->value.val, &(kv->value), kv->length); } @@ -367,6 +371,10 @@ int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSc } else if (kv->type == TSDB_DATA_TYPE_BINARY || kv->type == TSDB_DATA_TYPE_VARBINARY || kv->type == TSDB_DATA_TYPE_GEOMETRY) { pVal->value.nData = kv->length; pVal->value.pData = (uint8_t*)kv->value; + } else if (kv->type == TSDB_DATA_TYPE_GEOMETRY) { + pVal->value.nData = kv->length; + pVal->value.pData = taosMemoryMalloc(kv->length); + memcpy(pVal->value.pData, (uint8_t*)kv->value, kv->length); } else { memcpy(&pVal->value.val, &(kv->value), kv->length); } diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c index de7d154db6..33699ed857 100644 --- a/source/libs/parser/src/parInsertUtil.c +++ b/source/libs/parser/src/parInsertUtil.c @@ -333,7 +333,7 @@ int32_t insGetTableDataCxt(SHashObj* pHash, void* id, int32_t idLen, STableMeta* static void destroyColVal(void* p) { SColVal* pVal = p; - if (TSDB_DATA_TYPE_NCHAR == pVal->type) { + if (TSDB_DATA_TYPE_NCHAR == pVal->type || TSDB_DATA_TYPE_GEOMETRY == pVal->type) { taosMemoryFree(pVal->value.pData); } } diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 72d8799a85..0e8f29e051 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -31,8 +31,8 @@ #define SYSTABLE_SHOW_TYPE_OFFSET QUERY_NODE_SHOW_DNODES_STMT typedef struct SRewriteTbNameContext { - int32_t errCode; - char* pTbName; + int32_t errCode; + char* pTbName; } SRewriteTbNameContext; typedef struct STranslateContext { @@ -54,7 +54,7 @@ typedef struct STranslateContext { bool stableQuery; bool showRewrite; SNode* pPrevRoot; - SNode* pPostRoot; + SNode* pPostRoot; } STranslateContext; typedef struct SBuildTopicContext { @@ -92,6 +92,7 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { .numOfShowCols = 1, .pShowCols = {"*"} }, +/* { .showType = QUERY_NODE_SHOW_MODULES_STMT, .pDbName = TSDB_INFORMATION_SCHEMA_DB, @@ -99,6 +100,7 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { .numOfShowCols = 1, .pShowCols = {"*"} }, +*/ { .showType = QUERY_NODE_SHOW_QNODES_STMT, .pDbName = TSDB_INFORMATION_SCHEMA_DB, @@ -278,10 +280,11 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode); static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode); static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal); -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt); -static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); -static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); -static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt); +static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); +static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); +static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_BY; } @@ -772,7 +775,8 @@ static SNodeList* getProjectList(const SNode* pNode) { static bool isTimeLineQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { - return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); + return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || + (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); } else if (QUERY_NODE_SET_OPERATOR == nodeType(pStmt)) { return TIME_LINE_GLOBAL == ((SSetOperator*)pStmt)->timeLineResMode; } else { @@ -791,7 +795,7 @@ static bool isGlobalTimeLineQuery(SNode* pStmt) { } static bool isTimeLineAlignedQuery(SNode* pStmt) { - SSelectStmt *pSelect = (SSelectStmt *)pStmt; + SSelectStmt* pSelect = (SSelectStmt*)pStmt; if (isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return true; } @@ -801,7 +805,7 @@ static bool isTimeLineAlignedQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT != nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return false; } - SSelectStmt *pSub = (SSelectStmt *)((STempTableNode*)pSelect->pFromTable)->pSubquery; + SSelectStmt* pSub = (SSelectStmt*)((STempTableNode*)pSelect->pFromTable)->pSubquery; if (nodesListMatch(pSelect->pPartitionByList, pSub->pPartitionByList)) { return true; } @@ -822,18 +826,18 @@ static bool isPrimaryKeyImpl(SNode* pExpr) { return true; } } else if (QUERY_NODE_OPERATOR == nodeType(pExpr)) { - SOperatorNode* pOper = (SOperatorNode*)pExpr; - if (OP_TYPE_ADD != pOper->opType && OP_TYPE_SUB != pOper->opType) { - return false; - } - if (!isPrimaryKeyImpl(pOper->pLeft)) { - return false; - } - if (QUERY_NODE_VALUE != nodeType(pOper->pRight)) { - return false; - } - return true; + SOperatorNode* pOper = (SOperatorNode*)pExpr; + if (OP_TYPE_ADD != pOper->opType && OP_TYPE_SUB != pOper->opType) { + return false; } + if (!isPrimaryKeyImpl(pOper->pLeft)) { + return false; + } + if (QUERY_NODE_VALUE != nodeType(pOper->pRight)) { + return false; + } + return true; + } return false; } @@ -860,7 +864,7 @@ static void setColumnInfoBySchema(const SRealTableNode* pTable, const SSchema* p pCol->tableType = pTable->pMeta->tableType; pCol->colId = pColSchema->colId; pCol->colType = (tagFlag >= 0 ? COLUMN_TYPE_TAG : COLUMN_TYPE_COLUMN); - pCol->hasIndex = ((0 == tagFlag) || (pColSchema != NULL && IS_IDX_ON(pColSchema))); + pCol->hasIndex = (pColSchema != NULL && IS_IDX_ON(pColSchema)); pCol->node.resType.type = pColSchema->type; pCol->node.resType.bytes = pColSchema->bytes; if (TSDB_DATA_TYPE_TIMESTAMP == pCol->node.resType.type) { @@ -1406,7 +1410,7 @@ static bool isCountStar(SFunctionNode* pFunc) { } static int32_t rewriteCountStarAsCount1(STranslateContext* pCxt, SFunctionNode* pCount) { - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1608,9 +1612,11 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } - if (pSelect->hasInterpFunc && (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { + if (pSelect->hasInterpFunc && + (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s ignoring null value options cannot be used when applying to multiple columns", pFunc->functionName); + "%s ignoring null value options cannot be used when applying to multiple columns", + pFunc->functionName); } if (NULL != pSelect->pWindow || NULL != pSelect->pGroupByList) { @@ -1648,7 +1654,8 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu } SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && - !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { + !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && + !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, "%s function requires valid time series input", pFunc->functionName); } @@ -1718,8 +1725,8 @@ static int32_t translateForbidSysTableFunc(STranslateContext* pCxt, SFunctionNod return TSDB_CODE_SUCCESS; } - SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; - SNode* pTable = pSelect->pFromTable; + SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; + SNode* pTable = pSelect->pFromTable; if (NULL != pTable && QUERY_NODE_REAL_TABLE == nodeType(pTable) && TSDB_SYSTEM_TABLE == ((SRealTableNode*)pTable)->pMeta->tableType) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, pFunc->functionName); @@ -2308,7 +2315,8 @@ static EDealRes doCheckExprForGroupBy(SNode** pNode, void* pContext) { } } if (isScanPseudoColumnFunc(*pNode) || QUERY_NODE_COLUMN == nodeType(*pNode)) { - if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { + if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || + (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { return generateDealNodeErrMsg(pCxt, getGroupByErrorCode(pCxt), ((SExprNode*)(*pNode))->userAlias); } else { return rewriteColToSelectValFunc(pCxt, pNode); @@ -2403,14 +2411,14 @@ static int32_t checkHavingGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) if (NULL != pSelect->pHaving) { code = checkExprForGroupBy(pCxt, &pSelect->pHaving); } -/* - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); - } - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); + } + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); + } + */ return code; } @@ -2669,10 +2677,10 @@ static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSele static EDealRes doTranslateTbName(SNode** pNode, void* pContext) { switch (nodeType(*pNode)) { case QUERY_NODE_FUNCTION: { - SFunctionNode *pFunc = (SFunctionNode *)*pNode; + SFunctionNode* pFunc = (SFunctionNode*)*pNode; if (FUNCTION_TYPE_TBNAME == pFunc->funcType) { - SRewriteTbNameContext *pCxt = (SRewriteTbNameContext*)pContext; - SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + SRewriteTbNameContext* pCxt = (SRewriteTbNameContext*)pContext; + SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; return DEAL_RES_ERROR; @@ -2711,11 +2719,12 @@ static int32_t replaceTbName(STranslateContext* pCxt, SSelectStmt* pSelect) { } SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable; - if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { + if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && + TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { return TSDB_CODE_SUCCESS; } - SNode** pNode = NULL; + SNode** pNode = NULL; SRewriteTbNameContext pRewriteCxt = {0}; pRewriteCxt.pTbName = pTable->table.tableName; @@ -3122,7 +3131,8 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList code = scalarCalculateConstants(pCastFunc, &pCell->pNode); } if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) { - code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); + code = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); } else if (TSDB_CODE_SUCCESS != code) { code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch"); } @@ -3588,7 +3598,6 @@ static int32_t createDefaultEveryNode(STranslateContext* pCxt, SNode** pOutput) pEvery->isDuration = true; pEvery->literal = taosStrdup("1s"); - *pOutput = (SNode*)pEvery; return TSDB_CODE_SUCCESS; } @@ -3683,15 +3692,15 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->currClause = SQL_CLAUSE_PARTITION_BY; int32_t code = TSDB_CODE_SUCCESS; - + if (pSelect->pPartitionByList) { int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable); SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0); - if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && - 1 == pSelect->pPartitionByList->length && (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { + if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && 1 == pSelect->pPartitionByList->length && + (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { pSelect->timeLineResMode = TIME_LINE_MULTI; } - + code = translateExprList(pCxt, pSelect->pPartitionByList); } if (TSDB_CODE_SUCCESS == code) { @@ -3955,9 +3964,9 @@ static int32_t translateSetOperProject(STranslateContext* pCxt, SSetOperator* pS } snprintf(pRightExpr->aliasName, sizeof(pRightExpr->aliasName), "%s", pLeftExpr->aliasName); SNode* pProj = createSetOperProject(pSetOperator->stmtName, pLeft); - if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) - && ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID - && ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { + if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) && + ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID && + ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { ((SColumnNode*)pProj)->colId = PRIMARYKEY_TIMESTAMP_COL_ID; } if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSetOperator->pProjectionList, pProj)) { @@ -5741,7 +5750,6 @@ static int32_t translateRestoreDnode(STranslateContext* pCxt, SRestoreComponentN return buildCmdMsg(pCxt, TDMT_MND_RESTORE_DNODE, (FSerializeFunc)tSerializeSRestoreDnodeReq, &restoreReq); } - static int32_t getSmaIndexDstVgId(STranslateContext* pCxt, const char* pDbName, const char* pTableName, int32_t* pVgId) { SVgroupInfo vg = {0}; @@ -5869,7 +5877,7 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS } static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { - int32_t code = checkCreateSmaIndex(pCxt, pStmt); + int32_t code = checkCreateSmaIndex(pCxt, pStmt); pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq)); if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY; if (TSDB_CODE_SUCCESS == code) { @@ -5883,13 +5891,15 @@ int32_t createIntervalFromCreateSmaIndexStmt(SCreateIndexStmt* pStmt, SInterval* pInterval->interval = ((SValueNode*)pStmt->pOptions->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit; pInterval->offset = NULL != pStmt->pOptions->pOffset ? ((SValueNode*)pStmt->pOptions->pOffset)->datum.i : 0; - pInterval->sliding = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; - pInterval->slidingUnit = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; + pInterval->sliding = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; + pInterval->slidingUnit = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; pInterval->precision = pStmt->pOptions->tsPrecision; return TSDB_CODE_SUCCESS; } -int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void ** pResRow) { +int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { int32_t code = TSDB_CODE_SUCCESS; SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot; int64_t lastTs = 0; @@ -6057,7 +6067,7 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS toName(pCxt->pParseCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name); tNameGetFullDbName(&name, pReq->subDbName); tNameExtractFullName(&name, pReq->subStbName); - if(pStmt->pQuery != NULL) { + if (pStmt->pQuery != NULL) { code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL); } } else if ('\0' != pStmt->subDbName[0]) { @@ -6112,11 +6122,12 @@ static EDealRes checkColumnTagsInCond(SNode* pNode, void* pContext) { addTagList(&pCxt->pTags, nodesCloneNode(pNode)); } } - + return DEAL_RES_CONTINUE; } -static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, SNodeList** ppProjection) { +static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, + SNodeList** ppProjection) { SBuildTopicContext colCxt = {.colExists = false, .colNotFound = false, .pMeta = pMeta, .pTags = NULL}; nodesWalkExprPostOrder(pStmt->pWhere, checkColumnTagsInCond, &colCxt); if (colCxt.colNotFound) { @@ -6126,18 +6137,18 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* nodesDestroyList(colCxt.pTags); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Columns are forbidden in where clause"); } - if (NULL == colCxt.pTags) { // put one column to select -// for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { - SSchema* column = &pMeta->schema[0]; - SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); - if (NULL == col) { - return TSDB_CODE_OUT_OF_MEMORY; - } - strcpy(col->colName, column->name); - strcpy(col->node.aliasName, col->colName); - strcpy(col->node.userAlias, col->colName); - addTagList(&colCxt.pTags, (SNode*)col); -// } + if (NULL == colCxt.pTags) { // put one column to select + // for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { + SSchema* column = &pMeta->schema[0]; + SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == col) { + return TSDB_CODE_OUT_OF_MEMORY; + } + strcpy(col->colName, column->name); + strcpy(col->node.aliasName, col->colName); + strcpy(col->node.userAlias, col->colName); + addTagList(&colCxt.pTags, (SNode*)col); + // } } *ppProjection = colCxt.pTags; @@ -6145,13 +6156,13 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* } static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt* pStmt, SNode** pSelect) { - SParseContext* pParCxt = pCxt->pParseCxt; - SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, - .requestId = pParCxt->requestId, + SParseContext* pParCxt = pCxt->pParseCxt; + SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, + .requestId = pParCxt->requestId, .requestObjRefId = pParCxt->requestRid, .mgmtEps = pParCxt->mgmtEpSet}; - SName name; - STableMeta* pMeta = NULL; + SName name; + STableMeta* pMeta = NULL; int32_t code = getTableMetaImpl(pCxt, toName(pParCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name), &pMeta); if (code) { taosMemoryFree(pMeta); @@ -6160,7 +6171,7 @@ static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt if (TSDB_SUPER_TABLE != pMeta->tableType) { taosMemoryFree(pMeta); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Only supertable table can be used"); - } + } SNodeList* pProjection = NULL; code = checkCollectTopicTags(pCxt, pStmt, pMeta, &pProjection); @@ -6558,7 +6569,8 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must be of VARCHAR type"); } - if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { + if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && + subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must not has column when no partition by clause"); } @@ -6914,28 +6926,28 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta if (NULL == col) { return TSDB_CODE_OUT_OF_MEMORY; } - + strcpy(col->tableAlias, pTable); strcpy(col->colName, pMeta->schema[0].name); SNodeList* pParamterList = nodesMakeList(); if (NULL == pParamterList) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); return TSDB_CODE_OUT_OF_MEMORY; } - - int32_t code = nodesListStrictAppend(pParamterList, (SNode *)col); + + int32_t code = nodesListStrictAppend(pParamterList, (SNode*)col); if (code) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); nodesDestroyList(pParamterList); return code; } - + SNode* pFunc = (SNode*)createFunction("last", pParamterList); if (NULL == pFunc) { nodesDestroyList(pParamterList); return TSDB_CODE_OUT_OF_MEMORY; } - + SNodeList* pProjectionList = nodesMakeList(); if (NULL == pProjectionList) { nodesDestroyList(pParamterList); @@ -6947,8 +6959,8 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta nodesDestroyList(pProjectionList); return code; } - - code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt **)pQuery); + + code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt**)pQuery); if (code) { nodesDestroyList(pProjectionList); return code; @@ -6986,14 +6998,14 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt if (TSDB_CODE_SUCCESS == code && pStmt->pOptions->fillHistory) { SRealTableNode* pTable = (SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable); code = createLastTsSelectStmt(pTable->table.dbName, pTable->table.tableName, pTable->pMeta, &pStmt->pPrevQuery); -/* - if (TSDB_CODE_SUCCESS == code) { - STranslateContext cxt = {0}; - int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); - code = translateQuery(&cxt, pStmt->pPrevQuery); - destroyTranslateContext(&cxt); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code) { + STranslateContext cxt = {0}; + int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); + code = translateQuery(&cxt, pStmt->pPrevQuery); + destroyTranslateContext(&cxt); + } + */ } taosMemoryFree(pMeta); return code; @@ -7088,7 +7100,7 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow)) { return code; } - + SIntervalWindowNode* pWindow = (SIntervalWindowNode*)pSelect->pWindow; pInterval->interval = ((SValueNode*)pWindow->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pWindow->pInterval)->unit; @@ -7096,16 +7108,16 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval->sliding = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->datum.i : pInterval->interval); pInterval->slidingUnit = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->unit : pInterval->intervalUnit); - pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; + pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; return code; } int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { SCreateStreamStmt* pStmt = (SCreateStreamStmt*)pQuery->pRoot; - STranslateContext cxt = {0}; - SInterval interval = {0}; - int64_t lastTs = 0; + STranslateContext cxt = {0}; + SInterval interval = {0}; + int64_t lastTs = 0; int32_t code = initTranslateContext(pParseCxt, NULL, &cxt); if (TSDB_CODE_SUCCESS == code) { @@ -7140,7 +7152,6 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void return code; } - static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pStmt) { SMDropStreamReq dropReq = {0}; SName name; @@ -7171,7 +7182,7 @@ static int32_t translateResumeStream(STranslateContext* pCxt, SResumeStreamStmt* static int32_t readFromFile(char* pName, int32_t* len, char** buf) { int64_t filesize = 0; - if (taosStatFile(pName, &filesize, NULL) < 0) { + if (taosStatFile(pName, &filesize, NULL, NULL) < 0) { return TAOS_SYSTEM_ERROR(errno); } @@ -7265,7 +7276,7 @@ static int32_t translateGrantTagCond(STranslateContext* pCxt, SGrantStmt* pStmt, } } - int32_t code = createRealTableForGrantTable(pStmt, &pTable); + int32_t code = createRealTableForGrantTable(pStmt, &pTable); if (TSDB_CODE_SUCCESS == code) { SName name; code = getTableMetaImpl(pCxt, toName(pCxt->pParseCxt->acctId, pTable->table.dbName, pTable->table.tableName, &name), @@ -7825,7 +7836,8 @@ static SNodeList* createProjectCols(int32_t ncols, const char* const pCols[]) { return pProjections; } -static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { SSelectStmt* pSelect = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT); if (NULL == pSelect) { return TSDB_CODE_OUT_OF_MEMORY; @@ -7848,9 +7860,8 @@ static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, S return TSDB_CODE_SUCCESS; } - static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTable, int32_t numOfProjs, - const char* const pProjCol[], SSelectStmt** pStmt) { + const char* const pProjCol[], SSelectStmt** pStmt) { SNodeList* pProjectionList = NULL; if (numOfProjs >= 0) { pProjectionList = createProjectCols(numOfProjs, pProjCol); @@ -7862,13 +7873,15 @@ static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTabl return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } static int32_t createSelectStmtForShow(ENodeType showType, SSelectStmt** pStmt) { const SSysTableShowAdapter* pShow = &sysTableShowAdapter[showType - SYSTABLE_SHOW_TYPE_OFFSET]; - return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, pStmt); + return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, + pStmt); } static int32_t createSelectStmtForShowTableDist(SShowTableDistributedStmt* pStmt, SSelectStmt** pOutput) { @@ -8006,8 +8019,8 @@ static int32_t createShowTableTagsProjections(SNodeList** pProjections, SNodeLis static int32_t rewriteShowStableTags(STranslateContext* pCxt, SQuery* pQuery) { SShowTableTagsStmt* pShow = (SShowTableTagsStmt*)pQuery->pRoot; SSelectStmt* pSelect = NULL; - int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal, - -1, NULL, &pSelect); + int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, + ((SValueNode*)pShow->pTbName)->literal, -1, NULL, &pSelect); if (TSDB_CODE_SUCCESS == code) { code = createShowTableTagsProjections(&pSelect->pProjectionList, &pShow->pTags); } @@ -9170,7 +9183,7 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) { case QUERY_NODE_SHOW_USERS_STMT: case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_FUNCTIONS_STMT: case QUERY_NODE_SHOW_INDEXES_STMT: diff --git a/source/libs/planner/inc/planInt.h b/source/libs/planner/inc/planInt.h index 092fe17411..24d77cb9a4 100644 --- a/source/libs/planner/inc/planInt.h +++ b/source/libs/planner/inc/planInt.h @@ -43,8 +43,14 @@ int32_t splitLogicPlan(SPlanContext* pCxt, SLogicSubplan* pLogicSubplan); int32_t scaleOutLogicPlan(SPlanContext* pCxt, SLogicSubplan* pLogicSubplan, SQueryLogicPlan** pLogicPlan); int32_t createPhysiPlan(SPlanContext* pCxt, SQueryLogicPlan* pLogicPlan, SQueryPlan** pPlan, SArray* pExecNodeList); -bool isPartTableAgg(SAggLogicNode* pAgg); -bool isPartTableWinodw(SWindowLogicNode* pWindow); +bool isPartTableAgg(SAggLogicNode* pAgg); +bool isPartTagAgg(SAggLogicNode* pAgg); +bool isPartTableWinodw(SWindowLogicNode* pWindow); + +#define CLONE_LIMIT 1 +#define CLONE_SLIMIT 1 << 1 +#define CLONE_LIMIT_SLIMIT (CLONE_LIMIT | CLONE_SLIMIT) +bool cloneLimit(SLogicNode* pParent, SLogicNode* pChild, uint8_t cloneWhat); #ifdef __cplusplus } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 16440be511..69a7b0cf87 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -368,8 +368,8 @@ static void scanPathOptSetGroupOrderScan(SScanLogicNode* pScan) { if (pScan->node.pParent && nodeType(pScan->node.pParent) == QUERY_NODE_LOGIC_PLAN_AGG) { SAggLogicNode* pAgg = (SAggLogicNode*)pScan->node.pParent; - bool withSlimit = pAgg->node.pSlimit != NULL || (pAgg->node.pParent && pAgg->node.pParent->pSlimit); - if (withSlimit && isPartTableAgg(pAgg)) { + bool withSlimit = pAgg->node.pSlimit != NULL; + if (withSlimit && (isPartTableAgg(pAgg) || isPartTagAgg(pAgg))) { pScan->groupOrderScan = pAgg->node.forceCreateNonBlockingOptr = true; } } @@ -2679,6 +2679,9 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp } nodesDestroyNode((SNode*)pAgg); tagScanOptCloneAncestorSlimit((SLogicNode*)pScanNode); + + pScanNode->onlyMetaCtbIdx = false; + pCxt->optimized = true; return TSDB_CODE_SUCCESS; } @@ -2698,39 +2701,31 @@ static void swapLimit(SLogicNode* pParent, SLogicNode* pChild) { pParent->pLimit = NULL; } -static void cloneLimit(SLogicNode* pParent, SLogicNode* pChild) { - SLimitNode* pLimit = NULL; - if (pParent->pLimit) { - pChild->pLimit = nodesCloneNode(pParent->pLimit); - pLimit = (SLimitNode*)pChild->pLimit; - pLimit->limit += pLimit->offset; - pLimit->offset = 0; - } - - if (pParent->pSlimit) { - pChild->pSlimit = nodesCloneNode(pParent->pSlimit); - pLimit = (SLimitNode*)pChild->pSlimit; - pLimit->limit += pLimit->offset; - pLimit->offset = 0; - } -} - static bool pushDownLimitHow(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPushTo); static bool pushDownLimitTo(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPushTo) { switch (nodeType(pNodeLimitPushTo)) { case QUERY_NODE_LOGIC_PLAN_WINDOW: { SWindowLogicNode* pWindow = (SWindowLogicNode*)pNodeLimitPushTo; if (pWindow->winType != WINDOW_TYPE_INTERVAL) break; - cloneLimit(pNodeWithLimit, pNodeLimitPushTo); + cloneLimit(pNodeWithLimit, pNodeLimitPushTo, CLONE_LIMIT_SLIMIT); return true; } case QUERY_NODE_LOGIC_PLAN_FILL: case QUERY_NODE_LOGIC_PLAN_SORT: { - cloneLimit(pNodeWithLimit, pNodeLimitPushTo); + cloneLimit(pNodeWithLimit, pNodeLimitPushTo, CLONE_LIMIT_SLIMIT); SNode* pChild = NULL; FOREACH(pChild, pNodeLimitPushTo->pChildren) { pushDownLimitHow(pNodeLimitPushTo, (SLogicNode*)pChild); } return true; } + case QUERY_NODE_LOGIC_PLAN_AGG: { + if (nodeType(pNodeWithLimit) == QUERY_NODE_LOGIC_PLAN_PROJECT && + (isPartTagAgg((SAggLogicNode*)pNodeLimitPushTo) || isPartTableAgg((SAggLogicNode*)pNodeLimitPushTo))) { + // when part by tag, slimit will be cloned to agg, and it will be pipelined. + // The scan below will do scanning with group order + return cloneLimit(pNodeWithLimit, pNodeLimitPushTo, CLONE_SLIMIT); + } + break; + } case QUERY_NODE_LOGIC_PLAN_SCAN: if (nodeType(pNodeWithLimit) == QUERY_NODE_LOGIC_PLAN_PROJECT && pNodeWithLimit->pLimit) { swapLimit(pNodeWithLimit, pNodeLimitPushTo); diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 06859e195d..c180be173a 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -511,6 +511,20 @@ static int32_t createSimpleScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSub return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, pScan, pPhyNode); } +static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, + SPhysiNode** pPhyNode) { + STagScanPhysiNode* pScan = + (STagScanPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pScanLogicNode, QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN); + if (NULL == pScan) { + return TSDB_CODE_OUT_OF_MEMORY; + } + vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); + + pScan->onlyMetaCtbIdx = pScanLogicNode->onlyMetaCtbIdx; + + return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); +} + static int32_t createLastRowScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, SPhysiNode** pPhyNode) { SLastRowScanPhysiNode* pScan = @@ -646,6 +660,7 @@ static int32_t createScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, pCxt->hasScan = true; switch (pScanLogicNode->scanType) { case SCAN_TYPE_TAG: + return createTagScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode); case SCAN_TYPE_BLOCK_INFO: return createSimpleScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode); case SCAN_TYPE_TABLE_COUNT: @@ -872,12 +887,16 @@ static int32_t rewritePrecalcExpr(SPhysiPlanContext* pCxt, SNode* pNode, SNodeLi } static int32_t createAggPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SAggLogicNode* pAggLogicNode, - SPhysiNode** pPhyNode) { + SPhysiNode** pPhyNode, SSubplan* pSubPlan) { SAggPhysiNode* pAgg = (SAggPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pAggLogicNode, QUERY_NODE_PHYSICAL_PLAN_HASH_AGG); if (NULL == pAgg) { return TSDB_CODE_OUT_OF_MEMORY; } + if (pAgg->node.pSlimit) { + pSubPlan->dynamicRowThreshold = true; + pSubPlan->rowsThreshold = ((SLimitNode*)pAgg->node.pSlimit)->limit; + } pAgg->mergeDataBlock = (GROUP_ACTION_KEEP == pAggLogicNode->node.groupAction ? false : true); pAgg->groupKeyOptimized = pAggLogicNode->hasGroupKeyOptimized; @@ -1617,7 +1636,7 @@ static int32_t doCreatePhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode case QUERY_NODE_LOGIC_PLAN_JOIN: return createJoinPhysiNode(pCxt, pChildren, (SJoinLogicNode*)pLogicNode, pPhyNode); case QUERY_NODE_LOGIC_PLAN_AGG: - return createAggPhysiNode(pCxt, pChildren, (SAggLogicNode*)pLogicNode, pPhyNode); + return createAggPhysiNode(pCxt, pChildren, (SAggLogicNode*)pLogicNode, pPhyNode, pSubplan); case QUERY_NODE_LOGIC_PLAN_PROJECT: return createProjectPhysiNode(pCxt, pChildren, (SProjectLogicNode*)pLogicNode, pPhyNode); case QUERY_NODE_LOGIC_PLAN_EXCHANGE: @@ -1721,6 +1740,8 @@ static SSubplan* makeSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogicSubpl pSubplan->id = pLogicSubplan->id; pSubplan->subplanType = pLogicSubplan->subplanType; pSubplan->level = pLogicSubplan->level; + pSubplan->rowsThreshold = 4096; + pSubplan->dynamicRowThreshold = false; if (NULL != pCxt->pPlanCxt->pUser) { snprintf(pSubplan->user, sizeof(pSubplan->user), "%s", pCxt->pPlanCxt->pUser); } diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 3f6c73b4e5..868aee7147 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -867,8 +867,16 @@ static int32_t stbSplSplitAggNodeForPartTable(SSplitContext* pCxt, SStableSplitI static int32_t stbSplSplitAggNodeForCrossTable(SSplitContext* pCxt, SStableSplitInfo* pInfo) { SLogicNode* pPartAgg = NULL; int32_t code = stbSplCreatePartAggNode((SAggLogicNode*)pInfo->pSplitNode, &pPartAgg); + + if (TSDB_CODE_SUCCESS == code) { - code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartAgg); + // if slimit was pushed down to agg, agg will be pipelined mode, add sort merge before parent agg + if ((SAggLogicNode*)pInfo->pSplitNode->pSlimit) + code = stbSplCreateMergeNode(pCxt, NULL, pInfo->pSplitNode, NULL, pPartAgg, true); + else + code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartAgg); + } else { + nodesDestroyNode((SNode*)pPartAgg); } if (TSDB_CODE_SUCCESS == code) { code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, diff --git a/source/libs/planner/src/planUtil.c b/source/libs/planner/src/planUtil.c index 88086cde1d..9febe102f6 100644 --- a/source/libs/planner/src/planUtil.c +++ b/source/libs/planner/src/planUtil.c @@ -349,7 +349,7 @@ static bool stbHasPartTbname(SNodeList* pPartKeys) { return false; } -static SNodeList* stbSplGetPartKeys(SLogicNode* pNode) { +static SNodeList* stbGetPartKeys(SLogicNode* pNode) { if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) { return ((SScanLogicNode*)pNode)->pGroupTags; } else if (QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode)) { @@ -367,11 +367,58 @@ bool isPartTableAgg(SAggLogicNode* pAgg) { return stbHasPartTbname(pAgg->pGroupKeys) && stbNotSystemScan((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0)); } - return stbHasPartTbname(stbSplGetPartKeys((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0))); + return stbHasPartTbname(stbGetPartKeys((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0))); +} + +static bool stbHasPartTag(SNodeList* pPartKeys) { + if (NULL == pPartKeys) { + return false; + } + SNode* pPartKey = NULL; + FOREACH(pPartKey, pPartKeys) { + if (QUERY_NODE_GROUPING_SET == nodeType(pPartKey)) { + pPartKey = nodesListGetNode(((SGroupingSetNode*)pPartKey)->pParameterList, 0); + } + if ((QUERY_NODE_FUNCTION == nodeType(pPartKey) && FUNCTION_TYPE_TAGS == ((SFunctionNode*)pPartKey)->funcType) || + (QUERY_NODE_COLUMN == nodeType(pPartKey) && COLUMN_TYPE_TAG == ((SColumnNode*)pPartKey)->colType)) { + return true; + } + } + return false; +} + +bool isPartTagAgg(SAggLogicNode* pAgg) { + if (1 != LIST_LENGTH(pAgg->node.pChildren)) { + return false; + } + if (pAgg->pGroupKeys) { + return stbHasPartTag(pAgg->pGroupKeys) && + stbNotSystemScan((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0)); + } + return stbHasPartTag(stbGetPartKeys((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0))); } bool isPartTableWinodw(SWindowLogicNode* pWindow) { - return stbHasPartTbname(stbSplGetPartKeys((SLogicNode*)nodesListGetNode(pWindow->node.pChildren, 0))); + return stbHasPartTbname(stbGetPartKeys((SLogicNode*)nodesListGetNode(pWindow->node.pChildren, 0))); } +bool cloneLimit(SLogicNode* pParent, SLogicNode* pChild, uint8_t cloneWhat) { + SLimitNode* pLimit; + bool cloned = false; + if (pParent->pLimit && (cloneWhat & CLONE_LIMIT)) { + pChild->pLimit = nodesCloneNode(pParent->pLimit); + pLimit = (SLimitNode*)pChild->pLimit; + pLimit->limit += pLimit->offset; + pLimit->offset = 0; + cloned = true; + } + if (pParent->pSlimit && (cloneWhat & CLONE_SLIMIT)) { + pChild->pSlimit = nodesCloneNode(pParent->pSlimit); + pLimit = (SLimitNode*)pChild->pSlimit; + pLimit->limit += pLimit->offset; + pLimit->offset = 0; + cloned = true; + } + return cloned; +} diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index 32d6dc65d9..7a557a744a 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -52,7 +52,6 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock) int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq); -int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData); int32_t streamDispatchCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet); int32_t streamDoDispatchScanHistoryFinishMsg(SStreamTask* pTask, const SStreamScanHistoryFinishReq* pReq, int32_t vgId, @@ -63,6 +62,7 @@ SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* int32_t streamAddEndScanHistoryMsg(SStreamTask* pTask, SRpcHandleInfo* pRpcInfo, SStreamScanHistoryFinishReq* pReq); int32_t streamNotifyUpstreamContinue(SStreamTask* pTask); int32_t streamTaskFillHistoryFinished(SStreamTask* pTask); +int32_t streamTransferStateToStreamTask(SStreamTask* pTask); extern int32_t streamBackendId; extern int32_t streamBackendCfWrapperId; diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index f85ade591c..af67490888 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -142,40 +142,6 @@ int32_t streamSchedExec(SStreamTask* pTask) { return 0; } -int32_t streamTaskEnqueueBlocks(SStreamTask* pTask, const SStreamDispatchReq* pReq, SRpcMsg* pRsp) { - int8_t status = 0; - - SStreamDataBlock* pBlock = createStreamDataFromDispatchMsg(pReq, STREAM_INPUT__DATA_BLOCK, pReq->dataSrcVgId); - if (pBlock == NULL) { - streamTaskInputFail(pTask); - status = TASK_INPUT_STATUS__FAILED; - qError("vgId:%d, s-task:%s failed to receive dispatch msg, reason: out of memory", pTask->pMeta->vgId, - pTask->id.idStr); - } else { - int32_t code = tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pBlock); - // input queue is full, upstream is blocked now - status = (code == TSDB_CODE_SUCCESS)? TASK_INPUT_STATUS__NORMAL:TASK_INPUT_STATUS__BLOCKED; - } - - // rsp by input status - void* buf = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamDispatchRsp)); - ((SMsgHead*)buf)->vgId = htonl(pReq->upstreamNodeId); - SStreamDispatchRsp* pDispatchRsp = POINTER_SHIFT(buf, sizeof(SMsgHead)); - - pDispatchRsp->inputStatus = status; - pDispatchRsp->streamId = htobe64(pReq->streamId); - pDispatchRsp->upstreamNodeId = htonl(pReq->upstreamNodeId); - pDispatchRsp->upstreamTaskId = htonl(pReq->upstreamTaskId); - pDispatchRsp->downstreamNodeId = htonl(pTask->info.nodeId); - pDispatchRsp->downstreamTaskId = htonl(pTask->id.taskId); - - pRsp->pCont = buf; - pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp); - tmsgSendRsp(pRsp); - - return status == TASK_INPUT_STATUS__NORMAL ? 0 : -1; -} - int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pRsp) { SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0); int8_t status = TASK_INPUT_STATUS__NORMAL; @@ -235,90 +201,115 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock return 0; } + + +static int32_t streamTaskAppendInputBlocks(SStreamTask* pTask, const SStreamDispatchReq* pReq) { + int8_t status = 0; + + SStreamDataBlock* pBlock = createStreamDataFromDispatchMsg(pReq, pReq->type, pReq->srcVgId); + if (pBlock == NULL) { + streamTaskInputFail(pTask); + status = TASK_INPUT_STATUS__FAILED; + qError("vgId:%d, s-task:%s failed to receive dispatch msg, reason: out of memory", pTask->pMeta->vgId, + pTask->id.idStr); + } else { + if (pBlock->type == STREAM_INPUT__TRANS_STATE) { + pTask->status.appendTranstateBlock = true; + } + + int32_t code = tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pBlock); + // input queue is full, upstream is blocked now + status = (code == TSDB_CODE_SUCCESS) ? TASK_INPUT_STATUS__NORMAL : TASK_INPUT_STATUS__BLOCKED; + } + + return status; +} + +static int32_t buildDispatchRsp(const SStreamTask* pTask, const SStreamDispatchReq* pReq, int32_t status, void** pBuf) { + *pBuf = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamDispatchRsp)); + if (*pBuf == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + ((SMsgHead*)(*pBuf))->vgId = htonl(pReq->upstreamNodeId); + SStreamDispatchRsp* pDispatchRsp = POINTER_SHIFT((*pBuf), sizeof(SMsgHead)); + + pDispatchRsp->inputStatus = status; + pDispatchRsp->streamId = htobe64(pReq->streamId); + pDispatchRsp->upstreamNodeId = htonl(pReq->upstreamNodeId); + pDispatchRsp->upstreamTaskId = htonl(pReq->upstreamTaskId); + pDispatchRsp->downstreamNodeId = htonl(pTask->info.nodeId); + pDispatchRsp->downstreamTaskId = htonl(pTask->id.taskId); + + return TSDB_CODE_SUCCESS; +} + +void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId) { + SStreamChildEpInfo* pInfo = streamTaskGetUpstreamTaskEpInfo(pTask, taskId); + if (pInfo != NULL) { + pInfo->dataAllowed = false; + } +} + int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp, bool exec) { qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64, pTask->id.idStr, pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen); - // todo add the input queue buffer limitation - streamTaskEnqueueBlocks(pTask, pReq, pRsp); - tDeleteStreamDispatchReq(pReq); + int32_t status = 0; - if (exec) { - if (streamTryExec(pTask) < 0) { - return -1; - } + SStreamChildEpInfo* pInfo = streamTaskGetUpstreamTaskEpInfo(pTask, pReq->upstreamTaskId); + ASSERT(pInfo != NULL); + + if (!pInfo->dataAllowed) { + qWarn("s-task:%s data from task:0x%x is denied, since inputQ is closed for it", pTask->id.idStr, pReq->upstreamTaskId); + status = TASK_INPUT_STATUS__BLOCKED; } else { - streamSchedExec(pTask); + // Current task has received the checkpoint req from the upstream task, from which the message should all be blocked + if (pReq->type == STREAM_INPUT__CHECKPOINT_TRIGGER) { + streamTaskCloseUpstreamInput(pTask, pReq->upstreamTaskId); + qDebug("s-task:%s close inputQ for upstream:0x%x", pTask->id.idStr, pReq->upstreamTaskId); + } + + status = streamTaskAppendInputBlocks(pTask, pReq); } - return 0; -} - -// todo record the idle time for dispatch data -int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) { - if (code != TSDB_CODE_SUCCESS) { - // dispatch message failed: network error, or node not available. - // in case of the input queue is full, the code will be TSDB_CODE_SUCCESS, the and pRsp>inputStatus will be set - // flag. here we need to retry dispatch this message to downstream task immediately. handle the case the failure - // happened too fast. todo handle the shuffle dispatch failure - if (code == TSDB_CODE_STREAM_TASK_NOT_EXIST) { - qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, no-retry", pTask->id.idStr, - pRsp->downstreamTaskId, tstrerror(code)); + { + // do send response with the input status + int32_t code = buildDispatchRsp(pTask, pReq, status, &pRsp->pCont); + if (code != TSDB_CODE_SUCCESS) { + // todo handle failure return code; - } else { - qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", pTask->id.idStr, - pRsp->downstreamTaskId, tstrerror(code), ++pTask->msgInfo.retryCount); - return streamDispatchAllBlocks(pTask, pTask->msgInfo.pData); - } - } - - qDebug("s-task:%s receive dispatch rsp, output status:%d code:%d", pTask->id.idStr, pRsp->inputStatus, code); - - // there are other dispatch message not response yet - if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { - int32_t leftRsp = atomic_sub_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1); - qDebug("s-task:%s is shuffle, left waiting rsp %d", pTask->id.idStr, leftRsp); - if (leftRsp > 0) { - return 0; - } - } - - pTask->msgInfo.retryCount = 0; - ASSERT(pTask->outputInfo.status == TASK_OUTPUT_STATUS__WAIT); - - qDebug("s-task:%s output status is set to:%d", pTask->id.idStr, pTask->outputInfo.status); - - // the input queue of the (down stream) task that receive the output data is full, - // so the TASK_INPUT_STATUS_BLOCKED is rsp - // todo blocking the output status - if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) { - pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time - - int32_t waitDuration = 300; // 300 ms - qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 "wait for %dms and retry dispatch data", - pTask->id.idStr, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, waitDuration); - streamRetryDispatchStreamBlock(pTask, waitDuration); - } else { // pipeline send data in output queue - // this message has been sent successfully, let's try next one. - destroyStreamDataBlock(pTask->msgInfo.pData); - pTask->msgInfo.pData = NULL; - - if (pTask->msgInfo.blockingTs != 0) { - int64_t el = taosGetTimestampMs() - pTask->msgInfo.blockingTs; - qDebug("s-task:%s resume to normal from inputQ blocking, idle time:%"PRId64"ms", pTask->id.idStr, el); - pTask->msgInfo.blockingTs = 0; } - // now ready for next data output - atomic_store_8(&pTask->outputInfo.status, TASK_OUTPUT_STATUS__NORMAL); - - // otherwise, continue dispatch the first block to down stream task in pipeline - streamDispatchStreamBlock(pTask); + pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp); + tmsgSendRsp(pRsp); } + tDeleteStreamDispatchReq(pReq); + streamSchedExec(pTask); + return 0; } +//int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp, bool exec) { +// qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64, pTask->id.idStr, +// pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen); +// +// // todo add the input queue buffer limitation +// streamTaskEnqueueBlocks(pTask, pReq, pRsp); +// tDeleteStreamDispatchReq(pReq); +// +// if (exec) { +// if (streamTryExec(pTask) < 0) { +// return -1; +// } +// } else { +// streamSchedExec(pTask); +// } +// +// return 0; +//} + int32_t streamProcessRunReq(SStreamTask* pTask) { if (streamTryExec(pTask) < 0) { return -1; @@ -385,12 +376,15 @@ int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) { destroyStreamDataBlock((SStreamDataBlock*) pItem); return code; } - } else if (type == STREAM_INPUT__CHECKPOINT) { + } else if (type == STREAM_INPUT__CHECKPOINT || type == STREAM_INPUT__TRANS_STATE) { taosWriteQitem(pTask->inputQueue->queue, pItem); + qDebug("s-task:%s checkpoint/trans-state blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr, total, size); } else if (type == STREAM_INPUT__GET_RES) { // use the default memory limit, refactor later. taosWriteQitem(pTask->inputQueue->queue, pItem); qDebug("s-task:%s data res enqueue, current(blocks:%d, size:%.2fMiB)", pTask->id.idStr, total, size); + } else { + ASSERT(0); } if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) { @@ -433,4 +427,16 @@ SStreamChildEpInfo * streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t } return NULL; -} \ No newline at end of file +} + +void streamTaskOpenAllUpstreamInput(SStreamTask* pTask) { + int32_t num = taosArrayGetSize(pTask->pUpstreamEpInfoList); + if (num == 0) { + return; + } + + for(int32_t i = 0; i < num; ++i) { + SStreamChildEpInfo* pInfo = taosArrayGetP(pTask->pUpstreamEpInfoList, i); + pInfo->dataAllowed = true; + } +} diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index bb4b842787..fc1b788b77 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -204,7 +204,7 @@ void streamFreeQitem(SStreamQueueItem* data) { if (type == STREAM_INPUT__GET_RES) { blockDataDestroy(((SStreamTrigger*)data)->pBlock); taosFreeQitem(data); - } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE) { + } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE || type == STREAM_INPUT__TRANS_STATE) { taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)blockDataFreeRes); taosFreeQitem(data); } else if (type == STREAM_INPUT__DATA_SUBMIT) { diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 6771d0cc28..694b0808f2 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -25,6 +25,9 @@ typedef struct SBlockName { char parTbName[TSDB_TABLE_NAME_LEN]; } SBlockName; +static int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, + int32_t numOfBlocks, int64_t dstTaskId, int32_t type); + static void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen) { pMsg->msgType = msgType; pMsg->pCont = pCont; @@ -35,8 +38,9 @@ static int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatc if (tStartEncode(pEncoder) < 0) return -1; if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->type) < 0) return -1; if (tEncodeI32(pEncoder, pReq->upstreamTaskId) < 0) return -1; - if (tEncodeI32(pEncoder, pReq->dataSrcVgId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->srcVgId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->upstreamChildId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->upstreamNodeId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->blockNum) < 0) return -1; @@ -88,8 +92,9 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->type) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->upstreamTaskId) < 0) return -1; - if (tDecodeI32(pDecoder, &pReq->dataSrcVgId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->srcVgId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->upstreamChildId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->upstreamNodeId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->blockNum) < 0) return -1; @@ -113,14 +118,15 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { } int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, int32_t numOfBlocks, - int64_t dstTaskId) { + int64_t dstTaskId, int32_t type) { pReq->streamId = pTask->id.streamId; - pReq->dataSrcVgId = vgId; + pReq->srcVgId = vgId; pReq->upstreamTaskId = pTask->id.taskId; pReq->upstreamChildId = pTask->info.selfChildId; pReq->upstreamNodeId = pTask->info.nodeId; pReq->blockNum = numOfBlocks; pReq->taskId = dstTaskId; + pReq->type = type; pReq->data = taosArrayInit(numOfBlocks, POINTER_BYTES); pReq->dataLen = taosArrayInit(numOfBlocks, sizeof(int32_t)); @@ -358,7 +364,8 @@ static int32_t doSendDispatchMsg(SStreamTask* pTask, const SStreamDispatchReq* p msg.pCont = buf; msg.msgType = pTask->msgInfo.msgType; - qDebug("s-task:%s dispatch msg to taskId:0x%x vgId:%d data msg", pTask->id.idStr, pReq->taskId, vgId); + qDebug("s-task:%s dispatch msg to taskId:0x%x vgId:%d data msg, len:%d", pTask->id.idStr, pReq->taskId, vgId, + msg.contLen); return tmsgSendReq(pEpSet, &msg); FAIL: @@ -436,9 +443,8 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S return 0; } -int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { +static int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { int32_t code = 0; - int32_t numOfBlocks = taosArrayGetSize(pData->blocks); ASSERT(numOfBlocks != 0); @@ -446,15 +452,15 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat SStreamDispatchReq req = {0}; int32_t downstreamTaskId = pTask->fixedEpDispatcher.taskId; - code = tInitStreamDispatchReq(&req, pTask, pData->srcVgId, numOfBlocks, downstreamTaskId); + code = tInitStreamDispatchReq(&req, pTask, pData->srcVgId, numOfBlocks, downstreamTaskId, pData->type); if (code != TSDB_CODE_SUCCESS) { return code; } for (int32_t i = 0; i < numOfBlocks; i++) { SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i); - code = streamAddBlockIntoDispatchMsg(pDataBlock, &req); + code = streamAddBlockIntoDispatchMsg(pDataBlock, &req); if (code != TSDB_CODE_SUCCESS) { taosArrayDestroyP(req.data, taosMemoryFree); taosArrayDestroy(req.dataLen); @@ -487,7 +493,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat for (int32_t i = 0; i < vgSz; i++) { SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); - code = tInitStreamDispatchReq(&pReqs[i], pTask, pData->srcVgId, 0, pVgInfo->taskId); + code = tInitStreamDispatchReq(&pReqs[i], pTask, pData->srcVgId, 0, pVgInfo->taskId, pData->type); if (code != TSDB_CODE_SUCCESS) { goto FAIL_SHUFFLE_DISPATCH; } @@ -497,8 +503,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i); // TODO: do not use broadcast - if (pDataBlock->info.type == STREAM_DELETE_RESULT) { - + if (pDataBlock->info.type == STREAM_DELETE_RESULT || pDataBlock->info.type == STREAM_CHECKPOINT || pDataBlock->info.type == STREAM_TRANS_STATE) { for (int32_t j = 0; j < vgSz; j++) { if (streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j]) < 0) { goto FAIL_SHUFFLE_DISPATCH; @@ -518,14 +523,14 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat } } - qDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to %d vgroups", pTask->id.idStr, pTask->info.selfChildId, - numOfBlocks, vgSz); + qDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to %d vgroups", pTask->id.idStr, + pTask->info.selfChildId, numOfBlocks, vgSz); for (int32_t i = 0; i < vgSz; i++) { if (pReqs[i].blockNum > 0) { SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); - qDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to vgId:%d", pTask->id.idStr, pTask->info.selfChildId, - pReqs[i].blockNum, pVgInfo->vgId); + qDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to vgId:%d", pTask->id.idStr, + pTask->info.selfChildId, pReqs[i].blockNum, pVgInfo->vgId); code = doSendDispatchMsg(pTask, &pReqs[i], pVgInfo->vgId, &pVgInfo->epSet); if (code < 0) { @@ -536,7 +541,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat code = 0; - FAIL_SHUFFLE_DISPATCH: + FAIL_SHUFFLE_DISPATCH: for (int32_t i = 0; i < vgSz; i++) { taosArrayDestroyP(pReqs[i].data, taosMemoryFree); taosArrayDestroy(pReqs[i].dataLen); @@ -559,7 +564,7 @@ static void doRetryDispatchData(void* param, void* tmrId) { ASSERT(pTask->outputInfo.status == TASK_OUTPUT_STATUS__WAIT); - int32_t code = streamDispatchAllBlocks(pTask, pTask->msgInfo.pData); + int32_t code = doDispatchAllBlocks(pTask, pTask->msgInfo.pData); if (code != TSDB_CODE_SUCCESS) { if (!streamTaskShouldStop(&pTask->status)) { qDebug("s-task:%s reset the waitRspCnt to be 0 before launch retry dispatch", pTask->id.idStr); @@ -607,12 +612,13 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) { } pTask->msgInfo.pData = pBlock; - ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK); + ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK || pBlock->type == STREAM_INPUT__CHECKPOINT_TRIGGER || + pBlock->type == STREAM_INPUT__TRANS_STATE); int32_t retryCount = 0; while (1) { - int32_t code = streamDispatchAllBlocks(pTask, pBlock); + int32_t code = doDispatchAllBlocks(pTask, pBlock); if (code == TSDB_CODE_SUCCESS) { break; } @@ -729,3 +735,88 @@ int32_t streamNotifyUpstreamContinue(SStreamTask* pTask) { num); return 0; } + +int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) { + const char* id = pTask->id.idStr; + + if (code != TSDB_CODE_SUCCESS) { + // dispatch message failed: network error, or node not available. + // in case of the input queue is full, the code will be TSDB_CODE_SUCCESS, the and pRsp>inputStatus will be set + // flag. here we need to retry dispatch this message to downstream task immediately. handle the case the failure + // happened too fast. + // todo handle the shuffle dispatch failure + if (code == TSDB_CODE_STREAM_TASK_NOT_EXIST) { // destination task does not exist, not retry anymore + qWarn("s-task:%s failed to dispatch msg to task:0x%x, no retry, since it is destroyed already", id, pRsp->downstreamTaskId); + } else { + qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", id, pRsp->downstreamTaskId, + tstrerror(code), ++pTask->msgInfo.retryCount); + int32_t ret = doDispatchAllBlocks(pTask, pTask->msgInfo.pData); + if (ret != TSDB_CODE_SUCCESS) { + } + } + + return TSDB_CODE_SUCCESS; + } + + qDebug("s-task:%s recv dispatch rsp from 0x%x, downstream task input status:%d code:%d", id, pRsp->downstreamTaskId, + pRsp->inputStatus, code); + + // there are other dispatch message not response yet + if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { + int32_t leftRsp = atomic_sub_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1); + qDebug("s-task:%s is shuffle, left waiting rsp %d", id, leftRsp); + if (leftRsp > 0) { + return 0; + } + } + + // transtate msg has been sent to downstream successfully. let's transfer the fill-history task state + SStreamDataBlock* p = pTask->msgInfo.pData; + if (p->type == STREAM_INPUT__TRANS_STATE) { + qDebug("s-task:%s dispatch transtate msg to downstream successfully, start to transfer state", id); + ASSERT(pTask->info.fillHistory == 1); + code = streamTransferStateToStreamTask(pTask); + if (code != TSDB_CODE_SUCCESS) { // todo: do nothing if error happens + } + + streamFreeQitem(pTask->msgInfo.pData); + return TSDB_CODE_SUCCESS; + } + + pTask->msgInfo.retryCount = 0; + ASSERT(pTask->outputInfo.status == TASK_OUTPUT_STATUS__WAIT); + + qDebug("s-task:%s output status is set to:%d", id, pTask->outputInfo.status); + + // the input queue of the (down stream) task that receive the output data is full, + // so the TASK_INPUT_STATUS_BLOCKED is rsp + if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) { + pTask->inputStatus = TASK_INPUT_STATUS__BLOCKED; // block the input of current task, to push pressure to upstream + pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time + qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 " wait for %dms and retry dispatch data", + id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS); + streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS); + } else { // pipeline send data in output queue + // this message has been sent successfully, let's try next one. + destroyStreamDataBlock(pTask->msgInfo.pData); + pTask->msgInfo.pData = NULL; + + if (pTask->msgInfo.blockingTs != 0) { + int64_t el = taosGetTimestampMs() - pTask->msgInfo.blockingTs; + qDebug("s-task:%s downstream task:0x%x resume to normal from inputQ blocking, blocking time:%" PRId64 "ms", id, + pRsp->downstreamTaskId, el); + pTask->msgInfo.blockingTs = 0; + + // put data into inputQ of current task is also allowed + pTask->inputStatus = TASK_INPUT_STATUS__NORMAL; + } + + // now ready for next data output + atomic_store_8(&pTask->outputInfo.status, TASK_OUTPUT_STATUS__NORMAL); + + // otherwise, continue dispatch the first block to down stream task in pipeline + streamDispatchStreamBlock(pTask); + } + + return 0; +} diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index c7da80fdaf..02bbce6485 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -287,21 +287,32 @@ static void waitForTaskIdle(SStreamTask* pTask, SStreamTask* pStreamTask) { } } -static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { +int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { SStreamMeta* pMeta = pTask->pMeta; SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId); if (pStreamTask == NULL) { - // todo: destroy the fill-history task here - qError("s-task:%s failed to find related stream task:0x%x, it may have been destroyed or closed", pTask->id.idStr, - pTask->streamTaskId.taskId); + qError( + "s-task:%s failed to find related stream task:0x%x, it may have been destroyed or closed, destroy the related " + "fill-history task", + pTask->id.idStr, pTask->streamTaskId.taskId); + + // 1. free it and remove fill-history task from disk meta-store + streamMetaUnregisterTask(pMeta, pTask->id.streamId, pTask->id.taskId); + + // 2. save to disk + taosWLockLatch(&pMeta->lock); + if (streamMetaCommit(pMeta) < 0) { + // persist to disk + } + taosWUnLockLatch(&pMeta->lock); return TSDB_CODE_STREAM_TASK_NOT_EXIST; } else { qDebug("s-task:%s fill-history task end, update related stream task:%s info, transfer exec state", pTask->id.idStr, pStreamTask->id.idStr); } - ASSERT(pStreamTask->historyTaskId.taskId == pTask->id.taskId && pTask->status.transferState == true); + ASSERT(pStreamTask->historyTaskId.taskId == pTask->id.taskId && pTask->status.appendTranstateBlock == true); STimeWindow* pTimeWindow = &pStreamTask->dataRange.window; @@ -380,34 +391,52 @@ static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { return TSDB_CODE_SUCCESS; } -static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { +int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { int32_t code = TSDB_CODE_SUCCESS; - if (!pTask->status.transferState) { - return code; - } + ASSERT(pTask->status.appendTranstateBlock == 1); int32_t level = pTask->info.taskLevel; if (level == TASK_LEVEL__SOURCE) { streamTaskFillHistoryFinished(pTask); - streamTaskEndScanWAL(pTask); - } else if (level == TASK_LEVEL__AGG) { // do transfer task operator states. + } + + if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) { // do transfer task operator states. code = streamDoTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { // todo handle this - return code; - } } return code; } -static int32_t extractMsgFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks, - const char* id) { - int32_t retryTimes = 0; - int32_t MAX_RETRY_TIMES = 5; +static int32_t extractBlocksFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks) { + int32_t retryTimes = 0; + int32_t MAX_RETRY_TIMES = 5; + const char* id = pTask->id.idStr; + if (pTask->info.taskLevel == TASK_LEVEL__SINK) { // extract block from inputQ, one-by-one + while (1) { + if (streamTaskShouldPause(&pTask->status) || streamTaskShouldStop(&pTask->status)) { + qDebug("s-task:%s task should pause, extract input blocks:%d", pTask->id.idStr, *numOfBlocks); + return TSDB_CODE_SUCCESS; + } + + SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue); + if (qItem == NULL) { + qDebug("===stream===break batchSize:%d, %s", *numOfBlocks, id); + return TSDB_CODE_SUCCESS; + } + + qDebug("s-task:%s sink task handle block one-by-one, type:%d", id, qItem->type); + + *numOfBlocks = 1; + *pInput = qItem; + return TSDB_CODE_SUCCESS; + } + } + + // non sink task while (1) { - if (streamTaskShouldPause(&pTask->status)) { - qDebug("s-task:%s task should pause, input blocks:%d", pTask->id.idStr, *numOfBlocks); + if (streamTaskShouldPause(&pTask->status) || streamTaskShouldStop(&pTask->status)) { + qDebug("s-task:%s task should pause, extract input blocks:%d", pTask->id.idStr, *numOfBlocks); return TSDB_CODE_SUCCESS; } @@ -415,51 +444,111 @@ static int32_t extractMsgFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu if (qItem == NULL) { if (pTask->info.taskLevel == TASK_LEVEL__SOURCE && (++retryTimes) < MAX_RETRY_TIMES) { taosMsleep(10); - qDebug("===stream===try again batchSize:%d, retry:%d", *numOfBlocks, retryTimes); + qDebug("===stream===try again batchSize:%d, retry:%d, %s", *numOfBlocks, retryTimes, id); continue; } - qDebug("===stream===break batchSize:%d", *numOfBlocks); + qDebug("===stream===break batchSize:%d, %s", *numOfBlocks, id); return TSDB_CODE_SUCCESS; } - // do not merge blocks for sink node - if (pTask->info.taskLevel == TASK_LEVEL__SINK) { - *numOfBlocks = 1; - *pInput = qItem; - return TSDB_CODE_SUCCESS; - } - - if (*pInput == NULL) { - ASSERT((*numOfBlocks) == 0); - *pInput = qItem; - } else { - // todo we need to sort the data block, instead of just appending into the array list. - void* newRet = streamMergeQueueItem(*pInput, qItem); - if (newRet == NULL) { - if (terrno == 0) { - qDebug("s-task:%s failed to merge blocks from inputQ, numOfBlocks:%d", id, *numOfBlocks); - } else { - qDebug("s-task:%s failed to merge blocks from inputQ, numOfBlocks:%d, code:%s", id, *numOfBlocks, - tstrerror(terrno)); - } + // do not merge blocks for sink node and check point data block + if (qItem->type == STREAM_INPUT__CHECKPOINT || qItem->type == STREAM_INPUT__CHECKPOINT_TRIGGER || + qItem->type == STREAM_INPUT__TRANS_STATE) { + if (*pInput == NULL) { + qDebug("s-task:%s checkpoint/transtate msg extracted, start to process immediately", id); + *numOfBlocks = 1; + *pInput = qItem; + return TSDB_CODE_SUCCESS; + } else { + // previous existed blocks needs to be handle, before handle the checkpoint msg block + qDebug("s-task:%s checkpoint/transtate msg extracted, handle previous blocks, numOfBlocks:%d", id, *numOfBlocks); streamQueueProcessFail(pTask->inputQueue); return TSDB_CODE_SUCCESS; } + } else { + if (*pInput == NULL) { + ASSERT((*numOfBlocks) == 0); + *pInput = qItem; + } else { + // todo we need to sort the data block, instead of just appending into the array list. + void* newRet = streamMergeQueueItem(*pInput, qItem); + if (newRet == NULL) { + qError("s-task:%s failed to merge blocks from inputQ, numOfBlocks:%d", id, *numOfBlocks); + streamQueueProcessFail(pTask->inputQueue); + return TSDB_CODE_SUCCESS; + } - *pInput = newRet; - } + *pInput = newRet; + } - *numOfBlocks += 1; - streamQueueProcessSuccess(pTask->inputQueue); + *numOfBlocks += 1; + streamQueueProcessSuccess(pTask->inputQueue); - if (*numOfBlocks >= MAX_STREAM_EXEC_BATCH_NUM) { - qDebug("s-task:%s batch size limit:%d reached, start to process blocks", id, MAX_STREAM_EXEC_BATCH_NUM); - return TSDB_CODE_SUCCESS; + if (*numOfBlocks >= MAX_STREAM_EXEC_BATCH_NUM) { + qDebug("s-task:%s batch size limit:%d reached, start to process blocks", id, MAX_STREAM_EXEC_BATCH_NUM); + return TSDB_CODE_SUCCESS; + } } } } +int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock) { + const char* id = pTask->id.idStr; + int32_t code = TSDB_CODE_SUCCESS; + + int32_t level = pTask->info.taskLevel; + if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SINK) { + int32_t remain = streamAlignTransferState(pTask); + if (remain > 0) { + streamFreeQitem((SStreamQueueItem*)pBlock); + qDebug("s-task:%s receive upstream transfer state msg, remain:%d", id, remain); + return 0; + } + } + + // dispatch the tran-state block to downstream task immediately + int32_t type = pTask->outputInfo.type; + + // transfer the ownership of executor state + if (type == TASK_OUTPUT__FIXED_DISPATCH || type == TASK_OUTPUT__SHUFFLE_DISPATCH) { + if (level == TASK_LEVEL__SOURCE) { + qDebug("s-task:%s add transfer-state block into outputQ", id); + } else { + qDebug("s-task:%s all upstream tasks send transfer-state block, add transfer-state block into outputQ", id); + ASSERT(pTask->streamTaskId.taskId != 0 && pTask->info.fillHistory == 1); + } + + // agg task should dispatch trans-state msg to sink task, to flush all data to sink task. + if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) { + pBlock->srcVgId = pTask->pMeta->vgId; + code = taosWriteQitem(pTask->outputInfo.queue->queue, pBlock); + if (code == 0) { + streamDispatchStreamBlock(pTask); + } else { + streamFreeQitem((SStreamQueueItem*)pBlock); + } + } else { // level == TASK_LEVEL__SINK + streamFreeQitem((SStreamQueueItem*)pBlock); + } + } else { // non-dispatch task, do task state transfer directly + streamFreeQitem((SStreamQueueItem*)pBlock); + if (level != TASK_LEVEL__SINK) { + qDebug("s-task:%s non-dispatch task, start to transfer state directly", id); + ASSERT(pTask->info.fillHistory == 1); + code = streamTransferStateToStreamTask(pTask); + + if (code != TSDB_CODE_SUCCESS) { + atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); + } + } else { + qDebug("s-task:%s sink task does not transfer state", id); + } + } + + return code; +} + /** * todo: the batch of blocks should be tuned dynamic, according to the total elapsed time of each batch of blocks, the * appropriate batch of blocks should be handled in 5 to 10 sec. @@ -478,12 +567,17 @@ int32_t streamExecForAll(SStreamTask* pTask) { // merge multiple input data if possible in the input queue. qDebug("s-task:%s start to extract data block from inputQ", id); - /*int32_t code = */extractMsgFromInputQ(pTask, &pInput, &batchSize, id); + /*int32_t code = */extractBlocksFromInputQ(pTask, &pInput, &batchSize); if (pInput == NULL) { ASSERT(batchSize == 0); break; } + if (pInput->type == STREAM_INPUT__TRANS_STATE) { + streamProcessTranstateBlock(pTask, (SStreamDataBlock*)pInput); + continue; + } + if (pTask->info.taskLevel == TASK_LEVEL__SINK) { ASSERT(pInput->type == STREAM_INPUT__DATA_BLOCK); qDebug("s-task:%s sink task start to sink %d blocks", id, batchSize); @@ -551,27 +645,6 @@ bool streamTaskIsIdle(const SStreamTask* pTask) { pTask->status.taskStatus == TASK_STATUS__DROPPING); } -int32_t streamTaskEndScanWAL(SStreamTask* pTask) { - const char* id = pTask->id.idStr; - double el = (taosGetTimestampMs() - pTask->tsInfo.step2Start) / 1000.0; - qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, el); - - // 1. notify all downstream tasks to transfer executor state after handle all history blocks. - int32_t code = streamDispatchTransferStateMsg(pTask); - if (code != TSDB_CODE_SUCCESS) { - // todo handle error - } - - // 2. do transfer stream task operator states. - pTask->status.transferState = true; - code = streamDoTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { // todo handle error - return code; - } - - return TSDB_CODE_SUCCESS; -} - int32_t streamTryExec(SStreamTask* pTask) { // this function may be executed by multi-threads, so status check is required. int8_t schedStatus = @@ -587,27 +660,13 @@ int32_t streamTryExec(SStreamTask* pTask) { } // todo the task should be commit here - if (taosQueueEmpty(pTask->inputQueue->queue)) { - // fill-history WAL scan has completed - if (pTask->status.transferState) { - code = streamTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - streamSchedExec(pTask); - } else { - atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); - qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), - pTask->status.schedStatus); - } - } else { - atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); - qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), - pTask->status.schedStatus); + atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); + qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), + pTask->status.schedStatus); - if ((!streamTaskShouldStop(&pTask->status)) && (!streamTaskShouldPause(&pTask->status))) { - streamSchedExec(pTask); - } + if (!(taosQueueEmpty(pTask->inputQueue->queue) || streamTaskShouldStop(&pTask->status) || + streamTaskShouldPause(&pTask->status))) { + streamSchedExec(pTask); } } else { qDebug("s-task:%s already started to exec by other thread, status:%s, sched-status:%d", id, diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 1e17ef7ef1..2506dbaead 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -372,68 +372,35 @@ int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask) { return 0; } -static int32_t doDispatchTransferMsg(SStreamTask* pTask, const SStreamTransferReq* pReq, int32_t vgId, SEpSet* pEpSet) { - void* buf = NULL; - int32_t code = -1; - SRpcMsg msg = {0}; - - int32_t tlen; - tEncodeSize(tEncodeStreamScanHistoryFinishReq, pReq, tlen, code); - if (code < 0) { - return -1; +int32_t appendTranstateIntoInputQ(SStreamTask* pTask) { + SStreamDataBlock* pTranstate = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, sizeof(SSDataBlock)); + if (pTranstate == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; } - buf = rpcMallocCont(sizeof(SMsgHead) + tlen); - if (buf == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; + SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); + if (pBlock == NULL) { + taosFreeQitem(pTranstate); + return TSDB_CODE_OUT_OF_MEMORY; } - ((SMsgHead*)buf)->vgId = htonl(vgId); - void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); + pTranstate->type = STREAM_INPUT__TRANS_STATE; - SEncoder encoder; - tEncoderInit(&encoder, abuf, tlen); - if ((code = tEncodeStreamScanHistoryFinishReq(&encoder, pReq)) < 0) { - if (buf) { - rpcFreeCont(buf); - } - return code; + pBlock->info.type = STREAM_TRANS_STATE; + pBlock->info.rows = 1; + pBlock->info.childId = pTask->info.selfChildId; + + pTranstate->blocks = taosArrayInit(4, sizeof(SSDataBlock));//pBlock; + taosArrayPush(pTranstate->blocks, pBlock); + + taosMemoryFree(pBlock); + if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pTranstate) < 0) { + taosFreeQitem(pTranstate); + return TSDB_CODE_OUT_OF_MEMORY; } - tEncoderClear(&encoder); - - msg.contLen = tlen + sizeof(SMsgHead); - msg.pCont = buf; - msg.msgType = TDMT_STREAM_TRANSFER_STATE; - msg.info.noResp = 1; - - tmsgSendReq(pEpSet, &msg); - qDebug("s-task:%s level:%d, status:%s dispatch transfer state msg to taskId:0x%x (vgId:%d)", pTask->id.idStr, - pTask->info.taskLevel, streamGetTaskStatusStr(pTask->status.taskStatus), pReq->downstreamTaskId, vgId); - - return 0; -} - -int32_t streamDispatchTransferStateMsg(SStreamTask* pTask) { - SStreamTransferReq req = { .streamId = pTask->id.streamId, .childId = pTask->info.selfChildId }; - - // serialize - if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) { - req.downstreamTaskId = pTask->fixedEpDispatcher.taskId; - doDispatchTransferMsg(pTask, &req, pTask->fixedEpDispatcher.nodeId, &pTask->fixedEpDispatcher.epSet); - } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { - SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos; - - int32_t numOfVgs = taosArrayGetSize(vgInfo); - for (int32_t i = 0; i < numOfVgs; i++) { - SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); - req.downstreamTaskId = pVgInfo->taskId; - doDispatchTransferMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet); - } - } - - return 0; + pTask->status.appendTranstateBlock = true; + return TSDB_CODE_SUCCESS; } // agg diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c index bd15567c87..051106b99d 100644 --- a/source/libs/sync/src/syncRaftStore.c +++ b/source/libs/sync/src/syncRaftStore.c @@ -42,7 +42,7 @@ int32_t raftStoreReadFile(SSyncNode *pNode) { const char *file = pNode->raftStorePath; SRaftStore *pStore = &pNode->raftStore; - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { sInfo("vgId:%d, raft store file:%s not exist, use default value", pNode->vgId, file); pStore->currentTerm = 0; pStore->voteFor.addr = 0; diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index ae1c775a18..9acc17e130 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -21,6 +21,7 @@ #include "syncRaftCfg.h" #include "syncRaftStore.h" #include "syncSnapshot.h" +#include "tglobal.h" void syncCfg2SimpleStr(const SSyncCfg* pCfg, char* buf, int32_t bufLen) { int32_t len = snprintf(buf, bufLen, "{num:%d, as:%d, [", pCfg->replicaNum, pCfg->myIndex); @@ -41,7 +42,22 @@ void syncUtilNodeInfo2EpSet(const SNodeInfo* pInfo, SEpSet* pEpSet) { } bool syncUtilNodeInfo2RaftId(const SNodeInfo* pInfo, SyncGroupId vgId, SRaftId* raftId) { - uint32_t ipv4 = taosGetIpv4FromFqdn(pInfo->nodeFqdn); + uint32_t ipv4 = 0xFFFFFFFF; + sDebug("vgId:%d, start to resolve sync addr fqdn in %d seconds, " + "dnode:%d cluster:%" PRId64 " fqdn:%s port:%u ", + vgId, tsResolveFQDNRetryTime, + pInfo->nodeId, pInfo->clusterId, pInfo->nodeFqdn, pInfo->nodePort); + for(int i = 0; i < tsResolveFQDNRetryTime; i++){ + ipv4 = taosGetIpv4FromFqdn(pInfo->nodeFqdn); + if (ipv4 == 0xFFFFFFFF || ipv4 == 1) { + sError("failed to resolve ipv4 addr, fqdn:%s, wait one second", pInfo->nodeFqdn); + taosSsleep(1); + } + else{ + break; + } + } + if (ipv4 == 0xFFFFFFFF || ipv4 == 1) { sError("failed to resolve ipv4 addr, fqdn:%s", pInfo->nodeFqdn); terrno = TSDB_CODE_TSC_INVALID_FQDN; diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 01d23a7e96..e700ef3d0a 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -20,6 +20,7 @@ #include "tutil.h" #include "walInt.h" + bool FORCE_INLINE walLogExist(SWal* pWal, int64_t ver) { return !walIsEmpty(pWal) && walGetFirstVer(pWal) <= ver && walGetLastVer(pWal) >= ver; } @@ -53,7 +54,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); TdFilePtr pFile = taosOpenFile(fnameStr, TD_FILE_READ | TD_FILE_WRITE); if (pFile == NULL) { @@ -304,7 +305,7 @@ int walRepairLogFileTs(SWal* pWal, bool* updateMeta) { walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); int32_t mtime = 0; - if (taosStatFile(fnameStr, NULL, &mtime) < 0) { + if (taosStatFile(fnameStr, NULL, &mtime, NULL) < 0) { terrno = TAOS_SYSTEM_ERROR(errno); wError("vgId:%d, failed to stat file due to %s, file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); return -1; @@ -353,7 +354,7 @@ int walTrimIdxFile(SWal* pWal, int32_t fileIdx) { walBuildIdxName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); int64_t records = TMAX(0, pFileInfo->lastVer - pFileInfo->firstVer + 1); int64_t lastEndOffset = records * sizeof(SWalIdxEntry); @@ -436,7 +437,7 @@ int walCheckAndRepairMeta(SWal* pWal) { SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); - int32_t code = taosStatFile(fnameStr, &fileSize, NULL); + int32_t code = taosStatFile(fnameStr, &fileSize, NULL, NULL); if (code < 0) { terrno = TAOS_SYSTEM_ERROR(errno); wError("failed to stat file since %s. file:%s", terrstr(), fnameStr); @@ -522,7 +523,7 @@ int walCheckAndRepairIdxFile(SWal* pWal, int32_t fileIdx) { walBuildLogName(pWal, pFileInfo->firstVer, fLogNameStr); int64_t fileSize = 0; - if (taosStatFile(fnameStr, &fileSize, NULL) < 0 && errno != ENOENT) { + if (taosStatFile(fnameStr, &fileSize, NULL, NULL) < 0 && errno != ENOENT) { wError("vgId:%d, failed to stat file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); terrno = TAOS_SYSTEM_ERROR(errno); return -1; @@ -935,7 +936,7 @@ int walLoadMeta(SWal* pWal) { walBuildMetaName(pWal, metaVer, fnameStr); // read metafile int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); if (fileSize == 0) { (void)taosRemoveFile(fnameStr); wDebug("vgId:%d, wal find empty meta ver %d", pWal->cfg.vgId, metaVer); diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index dd670595f0..c4309b2c55 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -191,7 +191,7 @@ int32_t taosRenameFile(const char *oldName, const char *newName) { #endif } -int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { +int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime) { #ifdef WINDOWS struct _stati64 fileStat; int32_t code = _stati64(path, &fileStat); @@ -211,6 +211,10 @@ int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { *mtime = fileStat.st_mtime; } + if (atime != NULL) { + *atime = fileStat.st_mtime; + } + return 0; } int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno) { @@ -540,7 +544,7 @@ int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime) { #ifdef WINDOWS struct __stat64 fileStat; - int32_t code = _fstat64(pFile->fd, &fileStat); + int32_t code = _fstat64(pFile->fd, &fileStat); #else struct stat fileStat; int32_t code = fstat(pFile->fd, &fileStat); @@ -897,17 +901,17 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) { goto cmp_end; } - dstFp = gzdopen(pFile->fd, "wb6f"); - if (dstFp == NULL) { - ret = -3; - taosCloseFile(&pFile); - goto cmp_end; - } + dstFp = gzdopen(pFile->fd, "wb6f"); + if (dstFp == NULL) { + ret = -3; + taosCloseFile(&pFile); + goto cmp_end; + } - while (!feof(pSrcFile->fp)) { - len = (int32_t)fread(data, 1, compressSize, pSrcFile->fp); - (void)gzwrite(dstFp, data, len); - } + while (!feof(pSrcFile->fp)) { + len = (int32_t)fread(data, 1, compressSize, pSrcFile->fp); + (void)gzwrite(dstFp, data, len); + } cmp_end: if (pSrcFile) { diff --git a/source/os/src/osRand.c b/source/os/src/osRand.c index 83c36a422d..9cb6f6e52a 100644 --- a/source/os/src/osRand.c +++ b/source/os/src/osRand.c @@ -27,11 +27,11 @@ void taosSeedRand(uint32_t seed) { return srand(seed); } uint32_t taosRand(void) { #ifdef WINDOWS - unsigned int pSeed; - rand_s(&pSeed); - return pSeed; + unsigned int pSeed; + rand_s(&pSeed); + return pSeed; #else - return rand(); + return rand(); #endif } @@ -80,6 +80,15 @@ void taosRandStr(char* str, int32_t size) { const char* set = "abcdefghijklmnopqrstuvwxyz0123456789-_."; int32_t len = 39; + for (int32_t i = 0; i < size; ++i) { + str[i] = set[taosRand() % len]; + } +} + +void taosRandStr2(char* str, int32_t size) { + const char* set = "abcdefghijklmnopqrstuvwxyz0123456789"; + int32_t len = 36; + for (int32_t i = 0; i < size; ++i) { str[i] = set[taosRand() % len]; } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index b0b407e2a5..466b9985e7 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -151,6 +151,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SHOWOBJ, "Data expired") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_QUERY_ID, "Invalid query id") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CONN_ID, "Invalid connection id") TAOS_DEFINE_ERROR(TSDB_CODE_MND_USER_DISABLED, "User is disabled") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_PLATFORM, "Unsupported feature on this platform") // mnode-sdb TAOS_DEFINE_ERROR(TSDB_CODE_SDB_OBJ_ALREADY_THERE, "Object already there") diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index de7ad848ed..4a15b5b976 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -17,8 +17,8 @@ #include "tlog.h" #include "os.h" #include "tconfig.h" -#include "tjson.h" #include "tglobal.h" +#include "tjson.h" #define LOG_MAX_LINE_SIZE (10024) #define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) @@ -74,12 +74,12 @@ static SLogObj tsLogObj = {.fileNum = 1}; static int64_t tsAsyncLogLostLines = 0; static int32_t tsDaylightActive; /* Currently in daylight saving time. */ -bool tsLogEmbedded = 0; -bool tsAsyncLog = true; +bool tsLogEmbedded = 0; +bool tsAsyncLog = true; #ifdef ASSERT_NOT_CORE -bool tsAssert = false; +bool tsAssert = false; #else -bool tsAssert = true; +bool tsAssert = true; #endif int32_t tsNumOfLogLines = 10000000; int32_t tsLogKeepDays = 0; @@ -160,7 +160,7 @@ int32_t taosInitSlowLog() { tsLogObj.slowHandle = taosLogBuffNew(LOG_SLOW_BUF_SIZE); if (tsLogObj.slowHandle == NULL) return -1; - + taosUmaskFile(0); tsLogObj.slowHandle->pFile = taosOpenFile(fullName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); if (tsLogObj.slowHandle->pFile == NULL) { @@ -403,13 +403,13 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { strcpy(name, fn); strcat(name, ".0"); } - bool log0Exist = taosStatFile(name, NULL, &logstat0_mtime) >= 0; + bool log0Exist = taosStatFile(name, NULL, &logstat0_mtime, NULL) >= 0; if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { strcpy(name, fn); strcat(name, ".1"); } - bool log1Exist = taosStatFile(name, NULL, &logstat1_mtime) >= 0; + bool log1Exist = taosStatFile(name, NULL, &logstat1_mtime, NULL) >= 0; // if none of the log files exist, open 0, if both exists, open the old one if (!log0Exist && !log1Exist) { @@ -576,7 +576,7 @@ void taosPrintSlowLog(const char *format, ...) { } else { taosWriteFile(tsLogObj.slowHandle->pFile, buffer, len); } - + taosMemoryFree(buffer); } @@ -769,12 +769,12 @@ static void taosWriteLog(SLogBuff *pLogBuf) { static void *taosAsyncOutputLog(void *param) { SLogBuff *pLogBuf = (SLogBuff *)tsLogObj.logHandle; SLogBuff *pSlowBuf = (SLogBuff *)tsLogObj.slowHandle; - + setThreadName("log"); int32_t count = 0; int32_t updateCron = 0; int32_t writeInterval = 0; - + while (1) { writeInterval = TMIN(pLogBuf->writeInterval, pSlowBuf->writeInterval); count += writeInterval; @@ -834,12 +834,12 @@ bool taosAssertDebug(bool condition, const char *file, int32_t line, const char return true; } -void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, void *sigInfo) { +void taosLogCrashInfo(char *nodeType, char *pMsg, int64_t msgLen, int signum, void *sigInfo) { const char *flags = "UTL FATAL "; ELogLevel level = DEBUG_FATAL; int32_t dflag = 255; - char filepath[PATH_MAX] = {0}; - TdFilePtr pFile = NULL; + char filepath[PATH_MAX] = {0}; + TdFilePtr pFile = NULL; if (pMsg && msgLen > 0) { snprintf(filepath, sizeof(filepath), "%s%s.%sCrashLog", tsLogDir, TD_DIRSEP, nodeType); @@ -856,16 +856,16 @@ void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, vo int64_t writeSize = taosWriteFile(pFile, &msgLen, sizeof(msgLen)); if (sizeof(msgLen) != writeSize) { taosUnLockFile(pFile); - taosPrintLog(flags, level, dflag, "failed to write len to file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", - filepath, pFile, writeSize, sizeof(msgLen), terrstr()); + taosPrintLog(flags, level, dflag, "failed to write len to file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", + filepath, pFile, writeSize, sizeof(msgLen), terrstr()); goto _return; } writeSize = taosWriteFile(pFile, pMsg, msgLen); if (msgLen != writeSize) { taosUnLockFile(pFile); - taosPrintLog(flags, level, dflag, "failed to write file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", - filepath, pFile, writeSize, msgLen, terrstr()); + taosPrintLog(flags, level, dflag, "failed to write file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", + filepath, pFile, writeSize, msgLen, terrstr()); goto _return; } @@ -883,7 +883,7 @@ _return: taosPrintTrace(flags, level, dflag, 4); #elif !defined(WINDOWS) taosPrintLog(flags, level, dflag, "sender PID:%d cmdline:%s", ((siginfo_t *)sigInfo)->si_pid, - taosGetCmdlineByPID(((siginfo_t *)sigInfo)->si_pid)); + taosGetCmdlineByPID(((siginfo_t *)sigInfo)->si_pid)); taosPrintTrace(flags, level, dflag, 3); #else taosPrintTrace(flags, level, dflag, 8); @@ -892,17 +892,17 @@ _return: taosMemoryFree(pMsg); } -void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* pFd) { +void taosReadCrashInfo(char *filepath, char **pMsg, int64_t *pMsgLen, TdFilePtr *pFd) { const char *flags = "UTL FATAL "; ELogLevel level = DEBUG_FATAL; int32_t dflag = 255; TdFilePtr pFile = NULL; bool truncateFile = false; - char* buf = NULL; + char *buf = NULL; if (NULL == *pFd) { int64_t filesize = 0; - if (taosStatFile(filepath, &filesize, NULL) < 0) { + if (taosStatFile(filepath, &filesize, NULL, NULL) < 0) { if (ENOENT == errno) { return; } @@ -916,7 +916,7 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* return; } - pFile = taosOpenFile(filepath, TD_FILE_READ|TD_FILE_WRITE); + pFile = taosOpenFile(filepath, TD_FILE_READ | TD_FILE_WRITE); if (pFile == NULL) { if (ENOENT == errno) { return; @@ -926,7 +926,7 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* taosPrintLog(flags, level, dflag, "failed to open file:%s since %s", filepath, terrstr()); return; } - + taosLockFile(pFile); } else { pFile = *pFd; @@ -937,8 +937,8 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* if (sizeof(msgLen) != readSize) { truncateFile = true; if (readSize < 0) { - taosPrintLog(flags, level, dflag, "failed to read len from file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", - filepath, pFile, readSize, sizeof(msgLen), terrstr()); + taosPrintLog(flags, level, dflag, "failed to read len from file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", + filepath, pFile, readSize, sizeof(msgLen), terrstr()); } goto _return; } @@ -948,12 +948,12 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* taosPrintLog(flags, level, dflag, "failed to malloc buf, size:%" PRId64, msgLen); goto _return; } - + readSize = taosReadFile(pFile, buf, msgLen); if (msgLen != readSize) { truncateFile = true; - taosPrintLog(flags, level, dflag, "failed to read file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", - filepath, pFile, readSize, msgLen, terrstr()); + taosPrintLog(flags, level, dflag, "failed to read file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", filepath, + pFile, readSize, msgLen, terrstr()); goto _return; } @@ -981,7 +981,7 @@ void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile) { if (truncateFile) { taosFtruncateFile(pFile, 0); } - + taosUnLockFile(pFile); taosCloseFile(&pFile); } diff --git a/tests/develop-test/2-query/table_count_scan.py b/tests/develop-test/2-query/table_count_scan.py index 758d28948d..d6e49964eb 100644 --- a/tests/develop-test/2-query/table_count_scan.py +++ b/tests/develop-test/2-query/table_count_scan.py @@ -65,7 +65,7 @@ class TDTestCase: tdSql.query('select count(*),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') tdSql.checkRows(3) - tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 0, 23) tdSql.checkData(0, 1, 'information_schema') tdSql.checkData(0, 2, None) tdSql.checkData(1, 0, 3) @@ -77,7 +77,7 @@ class TDTestCase: tdSql.query('select count(1) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v desc;') tdSql.checkRows(3) - tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 0, 23) tdSql.checkData(0, 1, 'information_schema') tdSql.checkData(0, 2, None) tdSql.checkData(1, 0, 5) @@ -93,7 +93,7 @@ class TDTestCase: tdSql.checkData(1, 1, 'performance_schema') tdSql.checkData(0, 0, 3) tdSql.checkData(0, 1, 'tbl_count') - tdSql.checkData(2, 0, 24) + tdSql.checkData(2, 0, 23) tdSql.checkData(2, 1, 'information_schema') tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") @@ -106,7 +106,7 @@ class TDTestCase: tdSql.query('select count(*) from information_schema.ins_tables') tdSql.checkRows(1) - tdSql.checkData(0, 0, 32) + tdSql.checkData(0, 0, 31) tdSql.execute('create table stba (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') @@ -189,7 +189,7 @@ class TDTestCase: tdSql.checkData(2, 0, 5) tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.checkData(3, 0, 24) + tdSql.checkData(3, 0, 23) tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) @@ -204,7 +204,7 @@ class TDTestCase: tdSql.checkData(2, 0, 5) tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.checkData(3, 0, 24) + tdSql.checkData(3, 0, 23) tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) @@ -215,7 +215,7 @@ class TDTestCase: tdSql.checkData(0, 1, 'tbl_count') tdSql.checkData(1, 0, 5) tdSql.checkData(1, 1, 'performance_schema') - tdSql.checkData(2, 0, 24) + tdSql.checkData(2, 0, 23) tdSql.checkData(2, 1, 'information_schema') tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") @@ -228,7 +228,7 @@ class TDTestCase: tdSql.query('select count(*) from information_schema.ins_tables') tdSql.checkRows(1) - tdSql.checkData(0, 0, 33) + tdSql.checkData(0, 0, 32) tdSql.execute('drop database tbl_count') diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index a946a7feaf..741226f101 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -456,7 +456,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3 ,,n,system-test,python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3 -#,,n,system-test,python ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 +,,n,system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 @@ -1204,6 +1204,9 @@ ,,y,script,./test.sh -f tsim/tag/drop_tag.sim ,,y,script,./test.sh -f tsim/tag/tbNameIn.sim ,,y,script,./test.sh -f tmp/monitor.sim +,,y,script,./test.sh -f tsim/tagindex/add_index.sim +,,n,script,./test.sh -f tsim/tagindex/sma_and_tag_index.sim + #develop test ,,n,develop-test,python3 ./test.py -f 2-query/table_count_scan.py diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 5ae061072a..f5e426057e 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,6 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/.cos-local.1:/root/.cos-local.1 \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ @@ -88,6 +89,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/.cos-local.1:/root/.cos-local.1 \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ diff --git a/tests/script/tsim/query/sys_tbname.sim b/tests/script/tsim/query/sys_tbname.sim index f49a8e0a7d..6ed978aa15 100644 --- a/tests/script/tsim/query/sys_tbname.sim +++ b/tests/script/tsim/query/sys_tbname.sim @@ -58,7 +58,7 @@ endi sql select tbname from information_schema.ins_tables; print $rows $data00 -if $rows != 33 then +if $rows != 32 then return -1 endi if $data00 != @ins_tables@ then diff --git a/tests/script/tsim/query/tableCount.sim b/tests/script/tsim/query/tableCount.sim index 6e65852dcc..524b43620c 100644 --- a/tests/script/tsim/query/tableCount.sim +++ b/tests/script/tsim/query/tableCount.sim @@ -53,7 +53,7 @@ sql select stable_name,count(table_name) from information_schema.ins_tables grou if $rows != 3 then return -1 endi -if $data01 != 30 then +if $data01 != 29 then return -1 endi if $data11 != 10 then @@ -72,7 +72,7 @@ endi if $data11 != 5 then return -1 endi -if $data21 != 24 then +if $data21 != 23 then return -1 endi if $data31 != 5 then @@ -97,7 +97,7 @@ endi if $data42 != 3 then return -1 endi -if $data52 != 24 then +if $data52 != 23 then return -1 endi if $data62 != 5 then diff --git a/tests/script/tsim/sma/drop_sma.sim b/tests/script/tsim/sma/drop_sma.sim index 8fd8ebdcfd..fcf48f2b36 100644 --- a/tests/script/tsim/sma/drop_sma.sim +++ b/tests/script/tsim/sma/drop_sma.sim @@ -52,19 +52,35 @@ sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) in print --> show sma sql show indexes from stb from d1; -if $rows != 1 then +if $rows != 2 then return -1 endi -if $data[0][0] != sma_index_name1 then - return -1 -endi -if $data[0][1] != d1 then - return -1 -endi -if $data[0][2] != stb then - return -1 + +if $data[0][6] == tag_index then + if $data[1][0] != sma_index_name1 then + return -1 + endi + if $data[1][1] != d1 then + return -1 + endi + if $data[1][2] != stb then + return -1 + endi +else + if $data[0][0] != sma_index_name1 then + return -1 + endi + if $data[0][1] != d1 then + return -1 + endi + if $data[0][2] != stb then + return -1 + endi endi + + + print --> drop stb sql drop table stb; @@ -78,17 +94,30 @@ sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) in print --> show sma sql show indexes from stb from d1; -if $rows != 1 then +if $rows != 2 then return -1 endi -if $data[0][0] != sma_index_name1 then - return -1 -endi -if $data[0][1] != d1 then - return -1 -endi -if $data[0][2] != stb then - return -1 + +if $data[0][6] == tag_index then + if $data[1][0] != sma_index_name1 then + return -1 + endi + if $data[1][1] != d1 then + return -1 + endi + if $data[1][2] != stb then + return -1 + endi +else + if $data[0][0] != sma_index_name1 then + return -1 + endi + if $data[0][1] != d1 then + return -1 + endi + if $data[0][2] != stb then + return -1 + endi endi print --> drop stb diff --git a/tests/script/tsim/tagindex/add_index.sim b/tests/script/tsim/tagindex/add_index.sim index cfbec90542..a6e9cae670 100644 --- a/tests/script/tsim/tagindex/add_index.sim +++ b/tests/script/tsim/tagindex/add_index.sim @@ -7,7 +7,7 @@ print ======== step0 $dbPrefix = ta_3_db $tbPrefix = ta_3_tb $mtPrefix = ta_3_mt -$tbNum = 500 +$tbNum = 50 $rowNum = 20 $totalNum = 200 @@ -48,12 +48,16 @@ while $i < $tbNum $i = $i + 1 endw - +sql_error create index ti1 on $mtPrefix (t1) sql create index ti2 on $mtPrefix (t2) sql create index ti5 on $mtPrefix (t5) print ==== test name conflict # + +sql_error create index ti1 on $mtPrefix(t1) +sql_error create index ti11 on $mtPrefix(t1) + sql_error create index ti3 on $mtPrefix(t2) sql_error create index ti2 on $mtPrefix(t2) @@ -73,6 +77,15 @@ while $i < $tbNum $i = $i + 1 endw +$i = 0 +while $i < $tbNum + sql select * from $mtPrefix where t1= $i ; + if $rows != 1 then + return -1 + endi + $i = $i + 1 +endw + print ===== test operator great equal @@ -250,7 +263,7 @@ endw print === show index sql select * from information_schema.ins_indexes -if $rows != 1 then +if $rows != 2 then return -1 endi @@ -259,12 +272,41 @@ print === drop index ti2 sql drop index ti2 print === drop not exist index + +sql select * from information_schema.ins_indexes +if $rows != 1 then + return -1 +endi + +sql drop index $data[0][0] + +if $rows != 0 then + return -1 +endi + + sql_error drop index t2 sql_error drop index t3 +sql create index ti0 on $mtPrefix (t1) + +$i = $interval +while $i < $limit + sql select * from $mtPrefix where t1 <= $i ; + + $tmp = $i - $interval + $tmp = $tmp + 1 + if $rows != $tmp then + return -1 + endi + $i = $i + 1 +endw sql_error create index ti0 on $mtPrefix (t1) +sql_error create index ti2 on $mtPrefix (t1) + + sql_error create index t2i on ta_3_tb17 (t2) diff --git a/tests/script/tsim/tagindex/sma_and_tag_index.sim b/tests/script/tsim/tagindex/sma_and_tag_index.sim index b15d22d439..e7e4682810 100644 --- a/tests/script/tsim/tagindex/sma_and_tag_index.sim +++ b/tests/script/tsim/tagindex/sma_and_tag_index.sim @@ -69,7 +69,7 @@ sql create sma index smat2i on $mtPrefix function(max(c1)) interval(6m,10s) slid sql select * from information_schema.ins_indexes -if $rows != 2 then +if $rows != 3 then return -1 endi @@ -84,7 +84,7 @@ while $i < 5 endw sql select * from information_schema.ins_indexes -if $rows != 6 then +if $rows != 7 then return -1 endi @@ -114,13 +114,13 @@ sql use $dbPrefix sql create table if not exists $mtPrefix (ts timestamp, c1 int) tags (t1 int, t2 int, t3 int, t4 int, t5 int) sql create index tagt2i on $mtPrefix (t2) sql select * from information_schema.ins_indexes -if $rows != 1 then +if $rows != 2 then return -1 endi sql alter table $mtPrefix drop tag t2 sql select * from information_schema.ins_indexes -if $rows != 0 then +if $rows != 1 then return -1 endi @@ -128,18 +128,22 @@ endi print ==== rename tag name, and update index colName sql create index tagt3i on $mtPrefix (t3) sql select * from information_schema.ins_indexes -if $rows != 1 then +if $rows != 2 then return -1 endi sql alter table $mtPrefix rename tag t3 txxx sql select * from information_schema.ins_indexes -if $rows != 1 then +if $rows != 2 then return -1 endi -if $data05 != txxx then - return -1 +if $data05 == txxx then + print "manual created index" +elif $data15 == txxx then + print "auto created index at tag0" +else + return -1; endi @@ -153,7 +157,7 @@ sql create table if not exists $mtPrefix (ts timestamp, c1 int) tags (t1 int, t2 sql create index tagt3i on $mtPrefix (t3) sql select * from information_schema.ins_indexes -if $rows != 2 then +if $rows != 4 then return -1 endi diff --git a/tests/script/wtest.bat b/tests/script/wtest.bat index b642bad285..88ae703b7c 100644 --- a/tests/script/wtest.bat +++ b/tests/script/wtest.bat @@ -17,6 +17,9 @@ rem echo SIM_DIR: %SIM_DIR% set "TSIM_DIR=%SIM_DIR%tsim\" rem echo TSIM_DIR: %TSIM_DIR% +set "DATA_DIR=%TSIM_DIR%data\" +rem echo DATA_DIR: %DATA_DIR% + set "CFG_DIR=%TSIM_DIR%cfg\" rem echo CFG_DIR: %CFG_DIR% @@ -30,25 +33,30 @@ if not exist %SIM_DIR% mkdir %SIM_DIR% if not exist %TSIM_DIR% mkdir %TSIM_DIR% if exist %CFG_DIR% rmdir /s/q %CFG_DIR% if exist %LOG_DIR% rmdir /s/q %LOG_DIR% +if exist %DATA_DIR% rmdir /s/q %DATA_DIR% if not exist %CFG_DIR% mkdir %CFG_DIR% if not exist %LOG_DIR% mkdir %LOG_DIR% +if not exist %DATA_DIR% mkdir %DATA_DIR% set "fqdn=localhost" for /f "skip=1" %%A in ( 'wmic computersystem get caption' ) do if not defined fqdn set "fqdn=%%A" -echo firstEp %fqdn% > %TAOS_CFG% +echo firstEp %fqdn%:7100 > %TAOS_CFG% +echo secondEp %fqdn%:7200 >> %TAOS_CFG% echo fqdn %fqdn% >> %TAOS_CFG% echo serverPort 7100 >> %TAOS_CFG% +echo dataDir %DATA_DIR% >> %TAOS_CFG% echo logDir %LOG_DIR% >> %TAOS_CFG% echo scriptDir %SCRIPT_DIR% >> %TAOS_CFG% echo numOfLogLines 100000000 >> %TAOS_CFG% echo rpcDebugFlag 143 >> %TAOS_CFG% echo tmrDebugFlag 131 >> %TAOS_CFG% -echo cDebugFlag 135 >> %TAOS_CFG% +echo cDebugFlag 143 >> %TAOS_CFG% echo qDebugFlag 143 >> %TAOS_CFG% -echo udebugFlag 135 >> %TAOS_CFG% +echo uDebugFlag 143 >> %TAOS_CFG% +echo debugFlag 143 >> %TAOS_CFG% echo wal 0 >> %TAOS_CFG% echo asyncLog 0 >> %TAOS_CFG% echo locale en_US.UTF-8 >> %TAOS_CFG% diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index 762361f051..f7788d1d50 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -55,7 +55,7 @@ class TDTestCase: ] self.binary_str = 'taosdata' self.nchar_str = '涛思数据' - self.ins_list = ['ins_dnodes','ins_mnodes','ins_modules','ins_qnodes','ins_snodes','ins_cluster','ins_databases','ins_functions',\ + self.ins_list = ['ins_dnodes','ins_mnodes','ins_qnodes','ins_snodes','ins_cluster','ins_databases','ins_functions',\ 'ins_indexes','ins_stables','ins_tables','ins_tags','ins_columns','ins_users','ins_grants','ins_vgroups','ins_configs','ins_dnode_variables',\ 'ins_topics','ins_subscriptions','ins_streams','ins_stream_tasks','ins_vnodes','ins_user_privileges'] self.perf_list = ['perf_connections','perf_queries','perf_consumers','perf_trans','perf_apps'] diff --git a/tests/system-test/0-others/show_tag_index.py b/tests/system-test/0-others/show_tag_index.py index 663426b7ff..d39f9eaab9 100644 --- a/tests/system-test/0-others/show_tag_index.py +++ b/tests/system-test/0-others/show_tag_index.py @@ -59,14 +59,18 @@ class TDTestCase: tdSql.checkData(1, 2, 2) def check_indexes(self): - tdSql.checkRows(1) - tdSql.checkCols(7) - tdSql.checkData(0, 0, 'idx1') - tdSql.checkData(0, 1, 'db') - tdSql.checkData(0, 2, 'stb') - tdSql.checkData(0, 3, None) - tdSql.checkData(0, 5, 't1') - tdSql.checkData(0, 6, 'tag_index') + tdSql.checkRows(2) + for i in range(2): + col_name = tdSql.getData(i, 5) + if col_name == "t0": + continue + tdSql.checkCols(7) + tdSql.checkData(i, 0, 'idx1') + tdSql.checkData(i, 1, 'db') + tdSql.checkData(i, 2, 'stb') + tdSql.checkData(i, 3, None) + tdSql.checkData(i, 5, 't1') + tdSql.checkData(i, 6, 'tag_index') def run(self): tdSql.execute(f'create database db') diff --git a/tests/system-test/0-others/tag_index_basic.py b/tests/system-test/0-others/tag_index_basic.py index 72ed559ffd..c1e1d521d2 100644 --- a/tests/system-test/0-others/tag_index_basic.py +++ b/tests/system-test/0-others/tag_index_basic.py @@ -118,12 +118,15 @@ class TDTestCase: def show_tagidx(self, stbname): sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="db"' tdSql.query(sql) - rows = len(self.tag_dict.keys())-1 + rows = len(self.tag_dict.keys()) tdSql.checkRows(rows) for i in range(rows): col_name = tdSql.getData(i, 1) idx_name = f'idx_{col_name}' + # skip first tag + if col_name == "t1": + continue tdSql.checkData(i, 0, idx_name) tdLog.info(f' show {rows} tag indexs ok.') @@ -201,7 +204,7 @@ class TDTestCase: # check idx result is 0 sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="db"' tdSql.query(sql) - tdSql.checkRows(0) + tdSql.checkRows(1) tdLog.info(f' drop {cnt} tag indexs ok.') # create long name idx diff --git a/tests/system-test/0-others/walRetention.py b/tests/system-test/0-others/walRetention.py index 2b340b7969..5257b7644a 100644 --- a/tests/system-test/0-others/walRetention.py +++ b/tests/system-test/0-others/walRetention.py @@ -460,8 +460,7 @@ class TDTestCase: #self.test_db("db2", 5, 10*24*3600, 2*1024) # 2M size # period + size - self.test_db("db", checkTime = 5*60, wal_period = 60, wal_size_kb=10) - #self.test_db("db", checkTime = 3*60, wal_period = 0, wal_size_kb=0) + self.test_db("db", checkTime = 3*60, wal_period = 60, wal_size_kb=500) def stop(self): diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 986c63839b..c2eb7bee2e 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -20,6 +20,7 @@ class TDTestCase: tbname = "tb" tbname1 = "tb1" tbname2 = "tb2" + tbname3 = "tb3" stbname = "stb" ctbname1 = "ctb1" ctbname2 = "ctb2" @@ -5607,6 +5608,44 @@ class TDTestCase: tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:06') fill(linear)") tdSql.checkRows(0) + #### TS-3799 #### + + tdSql.execute( + f'''create table if not exists {dbname}.{tbname3} (ts timestamp, c0 double)''' + ) + + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:51.000000000', 4.233947800000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:52.000000000', 3.606781000000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:52.500000000', 3.162353500000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:53.000000000', 3.162292500000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:53.500000000', 4.998230000000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:54.400000000', 8.800414999999999)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:54.900000000', 8.853271500000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:55.900000000', 7.507751500000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:56.400000000', 7.510681000000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:56.900000000', 7.841614000000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:57.900000000', 8.153809000000001)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:58.500000000', 6.866455000000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:59.000000000', 6.869140600000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-07 00:00:00.000000000', 0.261475000000001)") + + tdSql.query(f"select _irowts, interp(c0) from {dbname}.{tbname3} range('2023-08-06 23:59:00','2023-08-06 23:59:59') every(1m) fill(next)") + tdSql.checkRows(1); + tdSql.checkData(0, 0, '2023-08-06 23:59:00') + tdSql.checkData(0, 1, 4.233947800000000) + + tdSql.query(f"select _irowts, interp(c0) from {dbname}.{tbname3} range('2023-08-06 23:59:00','2023-08-06 23:59:59') every(1m) fill(value, 1)") + tdSql.checkRows(1); + tdSql.checkData(0, 0, '2023-08-06 23:59:00') + tdSql.checkData(0, 1, 1) + + tdSql.query(f"select _irowts, interp(c0) from {dbname}.{tbname3} range('2023-08-06 23:59:00','2023-08-06 23:59:59') every(1m) fill(null)") + tdSql.checkRows(1); + tdSql.checkData(0, 0, '2023-08-06 23:59:00') + tdSql.checkData(0, 1, None) + + + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py index b3aeb72194..cae012ece1 100644 --- a/tests/system-test/2-query/sml.py +++ b/tests/system-test/2-query/sml.py @@ -110,6 +110,11 @@ class TDTestCase: tdSql.query(f"select * from ts3724.`stb2.`") tdSql.checkRows(1) + + # tdSql.query(f"select * from td24559.stb order by _ts") + # tdSql.checkRows(4) + # tdSql.checkData(0, 2, "POINT (4.343000 89.342000)") + # tdSql.checkData(3, 2, "GEOMETRYCOLLECTION (MULTIPOINT ((0.000000 0.000000), (1.000000 1.000000)), POINT (3.000000 4.000000), LINESTRING (2.000000 3.000000, 3.000000 4.000000))") return def run(self): diff --git a/tests/system-test/6-cluster/5dnode3mnodeRoll.py b/tests/system-test/6-cluster/5dnode3mnodeRoll.py index 38ac47f777..9d62eb3b4b 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRoll.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRoll.py @@ -27,7 +27,7 @@ import threading import time import json -BASEVERSION = "3.1.0.0" +BASEVERSION = "3.1.1.0" class TDTestCase: diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 6a42c208c7..8705d7ae38 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -18,9 +18,9 @@ #define _GNU_SOURCE #define _XOPEN_SOURCE #define _DEFAULT_SOURCE -#include "shellInt.h" -#include "shellAuto.h" #include "geosWrapper.h" +#include "shellAuto.h" +#include "shellInt.h" static bool shellIsEmptyCommand(const char *cmd); static int32_t shellRunSingleCommand(char *command); @@ -41,9 +41,9 @@ static bool shellIsCommentLine(char *line); static void shellSourceFile(const char *file); static void shellGetGrantInfo(); -static void shellCleanup(void *arg); -static void *shellCancelHandler(void *arg); -static void *shellThreadLoop(void *arg); +static void shellCleanup(void *arg); +static void *shellCancelHandler(void *arg); +static void *shellThreadLoop(void *arg); bool shellIsEmptyCommand(const char *cmd) { for (char c = *cmd++; c != 0; c = *cmd++) { @@ -66,7 +66,7 @@ int32_t shellRunSingleCommand(char *command) { if (shellRegexMatch(command, "^[\t ]*clear[ \t;]*$", REG_EXTENDED | REG_ICASE)) { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-result" - system("clear"); + system("clear"); #pragma GCC diagnostic pop return 0; } @@ -142,8 +142,8 @@ int32_t shellRunCommand(char *command, bool recordHistory) { return 0; } - // add help or help; - if(strncasecmp(command, "help;", 5) == 0) { + // add help or help; + if (strncasecmp(command, "help;", 5) == 0) { showHelp(); return 0; } @@ -223,14 +223,14 @@ void shellRunSingleCommandImp(char *command) { } // pre string - char * pre = "Query OK"; + char *pre = "Query OK"; if (shellRegexMatch(command, "^\\s*delete\\s*from\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Delete OK"; - } else if(shellRegexMatch(command, "^\\s*insert\\s*into\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*insert\\s*into\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Insert OK"; - } else if(shellRegexMatch(command, "^\\s*create\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*create\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Create OK"; - } else if(shellRegexMatch(command, "^\\s*drop\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*drop\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Drop OK"; } @@ -295,7 +295,7 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { if (taosLocalTime(&tt, &ptm, buf) == NULL) { return buf; } - size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); + size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); if (precision == TSDB_TIME_PRECISION_NANO) { sprintf(buf + pos, ".%09d", ms); @@ -388,22 +388,20 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_VARBINARY case TSDB_DATA_TYPE_NCHAR: - case TSDB_DATA_TYPE_JSON: - { - int32_t bufIndex = 0; - for (int32_t i = 0; i < length; i++) { + case TSDB_DATA_TYPE_JSON: { + int32_t bufIndex = 0; + for (int32_t i = 0; i < length; i++) { + buf[bufIndex] = val[i]; + bufIndex++; + if (val[i] == '\"') { buf[bufIndex] = val[i]; bufIndex++; - if (val[i] == '\"') { - buf[bufIndex] = val[i]; - bufIndex++; - } } - buf[bufIndex] = 0; - - taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); } - break; + buf[bufIndex] = 0; + + taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); + } break; case TSDB_DATA_TYPE_GEOMETRY: shellDumpHexValue(buf, val, length); taosFprintfFile(pFile, "%s", buf); @@ -536,12 +534,10 @@ void shellPrintString(const char *str, int32_t width) { if (width == 0) { printf("%s", str); - } - else if (len > width) { + } else if (len > width) { if (width <= 3) { printf("%.*s.", width - 1, str); - } - else { + } else { printf("%.*s...", width - 3, str); } } else { @@ -550,7 +546,7 @@ void shellPrintString(const char *str, int32_t width) { } void shellPrintGeometry(const unsigned char *val, int32_t length, int32_t width) { - if (length == 0) { //empty value + if (length == 0) { // empty value shellPrintString("", width); return; } @@ -566,7 +562,7 @@ void shellPrintGeometry(const unsigned char *val, int32_t length, int32_t width) char *outputWKT = NULL; code = doAsText(val, length, &outputWKT); if (code != TSDB_CODE_SUCCESS) { - shellPrintString(getThreadLocalGeosCtx()->errMsg, width); //should NOT happen + shellPrintString(getThreadLocalGeosCtx()->errMsg, width); // should NOT happen return; } @@ -614,14 +610,13 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t break; case TSDB_DATA_TYPE_FLOAT: if (tsEnableScience) { - printf("%*.7e",width,GET_FLOAT_VAL(val)); + printf("%*.7e", width, GET_FLOAT_VAL(val)); } else { n = snprintf(buf, LENGTH, "%*.7f", width, GET_FLOAT_VAL(val)); if (n > SHELL_FLOAT_WIDTH) { - - printf("%*.7e", width,GET_FLOAT_VAL(val)); + printf("%*.7e", width, GET_FLOAT_VAL(val)); } else { - printf("%s", buf); + printf("%s", buf); } } break; @@ -632,9 +627,9 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t } else { n = snprintf(buf, LENGTH, "%*.15f", width, GET_DOUBLE_VAL(val)); if (n > SHELL_DOUBLE_WIDTH) { - printf("%*.15e", width, GET_DOUBLE_VAL(val)); + printf("%*.15e", width, GET_DOUBLE_VAL(val)); } else { - printf("%*s", width,buf); + printf("%*s", width, buf); } } break; @@ -789,7 +784,7 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) { if (field->bytes > shell.args.displayWidth) { return TMAX(shell.args.displayWidth, width); } else { - return TMAX(field->bytes, width); + return TMAX(field->bytes + 2, width); } case TSDB_DATA_TYPE_NCHAR: @@ -798,7 +793,7 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) { if (bytes > shell.args.displayWidth) { return TMAX(shell.args.displayWidth, width); } else { - return TMAX(bytes, width); + return TMAX(bytes + 2, width); } } @@ -918,7 +913,7 @@ void shellReadHistory() { TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_READ | TD_FILE_STREAM); if (pFile == NULL) return; - char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); + char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); int32_t read_size = 0; while ((read_size = taosGetsFile(pFile, TSDB_MAX_ALLOWED_SQL_LEN, line)) != -1) { line[read_size - 1] = '\0'; @@ -935,8 +930,8 @@ void shellReadHistory() { taosMemoryFreeClear(line); taosCloseFile(&pFile); int64_t file_size; - if (taosStatFile(pHistory->file, &file_size, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) { - TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC); + if (taosStatFile(pHistory->file, &file_size, NULL, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) { + TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC); if (pFile == NULL) return; int32_t endIndex = pHistory->hstart; if (endIndex != 0) { @@ -958,7 +953,7 @@ void shellReadHistory() { void shellWriteHistory() { SShellHistory *pHistory = &shell.history; if (pHistory->hend == pHistory->hstart) return; - TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_APPEND); + TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_APPEND); if (pFile == NULL) return; for (int32_t i = pHistory->hstart; i != pHistory->hend;) { @@ -1004,7 +999,7 @@ void shellSourceFile(const char *file) { tstrncpy(fullname, file, PATH_MAX); } - sprintf(sourceFileCommand, "source %s;",fullname); + sprintf(sourceFileCommand, "source %s;", fullname); shellRecordCommandToHistory(sourceFileCommand); TdFilePtr pFile = taosOpenFile(fullname, TD_FILE_READ | TD_FILE_STREAM); @@ -1014,7 +1009,7 @@ void shellSourceFile(const char *file) { return; } - char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); + char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); while ((read_len = taosGetsFile(pFile, TSDB_MAX_ALLOWED_SQL_LEN, line)) != -1) { if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; line[--read_len] = '\0'; @@ -1057,7 +1052,8 @@ void shellGetGrantInfo() { int32_t code = taos_errno(tres); if (code != TSDB_CODE_SUCCESS) { - if (code != TSDB_CODE_OPS_NOT_SUPPORT && code != TSDB_CODE_MND_NO_RIGHTS && code != TSDB_CODE_PAR_PERMISSION_DENIED) { + if (code != TSDB_CODE_OPS_NOT_SUPPORT && code != TSDB_CODE_MND_NO_RIGHTS && + code != TSDB_CODE_PAR_PERMISSION_DENIED) { fprintf(stderr, "Failed to check Server Edition, Reason:0x%04x:%s\r\n\r\n", code, taos_errstr(tres)); } return; @@ -1093,7 +1089,8 @@ void shellGetGrantInfo() { } else if (strcmp(expiretime, "unlimited") == 0) { fprintf(stdout, "Server is Enterprise %s Edition, %s and will never expire.\r\n", serverVersion, sinfo); } else { - fprintf(stdout, "Server is Enterprise %s Edition, %s and will expire at %s.\r\n", serverVersion, sinfo, expiretime); + fprintf(stdout, "Server is Enterprise %s Edition, %s and will expire at %s.\r\n", serverVersion, sinfo, + expiretime); } taos_free_result(tres); @@ -1136,9 +1133,9 @@ void *shellCancelHandler(void *arg) { #ifdef WEBSOCKET } #endif - #ifdef WINDOWS +#ifdef WINDOWS printf("\n%s", shell.info.promptHeader); - #endif +#endif } return NULL; @@ -1178,8 +1175,7 @@ void *shellThreadLoop(void *arg) { } int32_t shellExecute() { - printf(shell.info.clientVersion, shell.info.cusName, - taos_get_client_info(), shell.info.cusName); + printf(shell.info.clientVersion, shell.info.cusName, taos_get_client_info(), shell.info.cusName); fflush(stdout); SShellArgs *pArgs = &shell.args; @@ -1246,13 +1242,13 @@ int32_t shellExecute() { taosSetSignal(SIGTERM, shellQueryInterruptHandler); taosSetSignal(SIGHUP, shellQueryInterruptHandler); taosSetSignal(SIGINT, shellQueryInterruptHandler); - + #ifdef WEBSOCKET if (!shell.args.restful && !shell.args.cloud) { #endif #ifndef WINDOWS printfIntroduction(); -#endif +#endif shellGetGrantInfo(); #ifdef WEBSOCKET } diff --git a/utils/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt index 3f52fc8e5d..b96814c13b 100644 --- a/utils/test/c/CMakeLists.txt +++ b/utils/test/c/CMakeLists.txt @@ -31,7 +31,7 @@ target_link_libraries( ) target_link_libraries( tmq_sim - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c index e4ed6037a3..237bfc5092 100644 --- a/utils/test/c/sml_test.c +++ b/utils/test/c/sml_test.c @@ -1552,12 +1552,45 @@ int sml_ts3724_Test() { return code; } +int sml_td24559_Test() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + + TAOS_RES *pRes = taos_query(taos, "drop database if exists td24559"); + taos_free_result(pRes); + + pRes = taos_query(taos, "create database if not exists td24559"); + taos_free_result(pRes); + + const char *sql[] = { + "stb,t1=1 f1=283i32,f2=g\"Point(4.343 89.342)\" 1632299372000", + "stb,t1=1 f2=G\"Point(4.343 89.342)\",f1=106i32 1632299373000", + "stb,t2=1 f2=G\"Point(4.343 89.342)\",f1=106i32 1632299374000", + "stb,t1=1 f1=106i32,f2=G\"GEOMETRYCOLLECTION (MULTIPOINT((0 0), (1 1)), POINT(3 4), LINESTRING(2 3, 3 4))\" 1632299378000", + }; + + pRes = taos_query(taos, "use td24559"); + taos_free_result(pRes); + + pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_MILLI_SECONDS); + + int code = taos_errno(pRes); + printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes)); + taos_free_result(pRes); + + taos_close(taos); + + return code; +} + int main(int argc, char *argv[]) { if (argc == 2) { taos_options(TSDB_OPTION_CONFIGDIR, argv[1]); } int ret = 0; + ret = sml_td24559_Test(); + ASSERT(!ret); ret = sml_td24070_Test(); ASSERT(!ret); ret = sml_td23881_Test(); diff --git a/utils/test/c/tmqDemo.c b/utils/test/c/tmqDemo.c index ce069c2b05..64f536433e 100644 --- a/utils/test/c/tmqDemo.c +++ b/utils/test/c/tmqDemo.c @@ -221,7 +221,7 @@ int64_t getDirectorySize(char* dir) { totalSize += subDirSize; } else if (0 == strcmp(strchr(fileName, '.'), ".log")) { // only calc .log file size, and not include .idx file int64_t file_size = 0; - taosStatFile(subdir, &file_size, NULL); + taosStatFile(subdir, &file_size, NULL, NULL); totalSize += file_size; } } @@ -702,4 +702,3 @@ int main(int32_t argc, char* argv[]) { taosCloseFile(&g_fp); return 0; } - diff --git a/utils/tsim/CMakeLists.txt b/utils/tsim/CMakeLists.txt index c2cf7ac3c5..209982c659 100644 --- a/utils/tsim/CMakeLists.txt +++ b/utils/tsim/CMakeLists.txt @@ -2,7 +2,7 @@ aux_source_directory(src TSIM_SRC) add_executable(tsim ${TSIM_SRC}) target_link_libraries( tsim - PUBLIC taos_static + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os