From 88d755be76d3b75bcebf9b114363ce0b5b4dcae1 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 13:31:00 +0800 Subject: [PATCH 01/78] feat(tsdb/cos): s3 migration --- cmake/cmake.options | 6 + cmake/cos_CMakeLists.txt.in | 12 + contrib/CMakeLists.txt | 23 + contrib/test/CMakeLists.txt | 5 + contrib/test/cos/CMakeLists.txt | 49 + contrib/test/cos/main.c | 3090 +++++++++++++++++++ source/common/src/tglobal.c | 68 +- source/dnode/vnode/CMakeLists.txt | 28 +- source/dnode/vnode/src/inc/vndCos.h | 36 + source/dnode/vnode/src/tsdb/tsdbRetention.c | 120 +- source/dnode/vnode/src/vnd/vnodeCos.c | 114 + source/dnode/vnode/src/vnd/vnodeModule.c | 5 + 12 files changed, 3530 insertions(+), 26 deletions(-) create mode 100644 cmake/cos_CMakeLists.txt.in create mode 100644 contrib/test/cos/CMakeLists.txt create mode 100644 contrib/test/cos/main.c create mode 100644 source/dnode/vnode/src/inc/vndCos.h create mode 100644 source/dnode/vnode/src/vnd/vnodeCos.c diff --git a/cmake/cmake.options b/cmake/cmake.options index fa0b888415..ea5efcb13a 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -125,6 +125,12 @@ option( ON ) +option( + BUILD_WITH_COS + "If build with cos" + ON +) + option( BUILD_WITH_SQLITE "If build with sqlite" diff --git a/cmake/cos_CMakeLists.txt.in b/cmake/cos_CMakeLists.txt.in new file mode 100644 index 0000000000..ee1e58b50f --- /dev/null +++ b/cmake/cos_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# cos +ExternalProject_Add(cos + GIT_REPOSITORY https://github.com/tencentyun/cos-c-sdk-v5.git + GIT_TAG v5.0.16 + SOURCE_DIR "${TD_CONTRIB_DIR}/cos-c-sdk-v5" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index c60fd33b16..df9519d00f 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -122,6 +122,12 @@ if(${BUILD_WITH_SQLITE}) cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif(${BUILD_WITH_SQLITE}) +# cos +if(${BUILD_WITH_COS}) + cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + add_definitions(-DUSE_COS) +endif(${BUILD_WITH_COS}) + # lucene if(${BUILD_WITH_LUCENE}) cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -347,6 +353,23 @@ if (${BUILD_WITH_ROCKSDB}) endif() endif() +# cos +if(${BUILD_WITH_COS}) + option(ENABLE_TEST "Enable the tests" OFF) + + set(CMAKE_BUILD_TYPE debug) + set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) + set(CMAKE_PROJECT_NAME cos_c_sdk) + + add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL) + target_include_directories( + cos_c_sdk + PUBLIC $ + ) + + set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) +endif(${BUILD_WITH_COS}) + # lucene # To support build on ubuntu: sudo apt-get install libboost-all-dev if(${BUILD_WITH_LUCENE}) diff --git a/contrib/test/CMakeLists.txt b/contrib/test/CMakeLists.txt index f35cf0d13d..1deff5a67e 100644 --- a/contrib/test/CMakeLists.txt +++ b/contrib/test/CMakeLists.txt @@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB}) add_subdirectory(rocksdb) endif(${BUILD_WITH_ROCKSDB}) +# cos +if(${BUILD_WITH_COS}) + add_subdirectory(cos) +endif(${BUILD_WITH_COS}) + if(${BUILD_WITH_LUCENE}) add_subdirectory(lucene) endif(${BUILD_WITH_LUCENE}) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt new file mode 100644 index 0000000000..77c57e5a65 --- /dev/null +++ b/contrib/test/cos/CMakeLists.txt @@ -0,0 +1,49 @@ +add_executable(cosTest "") +target_sources(cosTest + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/main.c" + ) + +#find_path(APR_INCLUDE_DIR apr-1/apr_time.h) +#find_path(APR_UTIL_INCLUDE_DIR apr/include/apr-1/apr_md5.h) +#find_path(MINIXML_INCLUDE_DIR mxml.h) +#find_path(CURL_INCLUDE_DIR curl/curl.h) + +#include_directories (${MINIXML_INCLUDE_DIR}) +#include_directories (${CURL_INCLUDE_DIR}) +FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) +#FIND_PROGRAM(APU_CONFIG_BIN NAMES apu-config apu-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) + +IF (APR_CONFIG_BIN) + EXECUTE_PROCESS( + COMMAND ${APR_CONFIG_BIN} --includedir + OUTPUT_VARIABLE APR_INCLUDE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +ENDIF() +#IF (APU_CONFIG_BIN) +# EXECUTE_PROCESS( +# COMMAND ${APU_CONFIG_BIN} --includedir +# OUTPUT_VARIABLE APR_UTIL_INCLUDE_DIR +# OUTPUT_STRIP_TRAILING_WHITESPACE +# ) +#ENDIF() + +include_directories (${APR_INCLUDE_DIR}) +#include_directories (${APR_UTIL_INCLUDE_DIR}) + +target_include_directories( + cosTest + PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + ) + +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) + +target_link_libraries(cosTest cos_c_sdk) +target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) +target_link_libraries(cosTest ${APR_LIBRARY}) +target_link_libraries(cosTest ${MINIXML_LIBRARY}) +target_link_libraries(cosTest ${CURL_LIBRARY}) diff --git a/contrib/test/cos/main.c b/contrib/test/cos/main.c new file mode 100644 index 0000000000..faaceee2e3 --- /dev/null +++ b/contrib/test/cos/main.c @@ -0,0 +1,3090 @@ +#include +#include +#include +#include + +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + +// endpoint 是 COS 访问域名信息,详情请参见 https://cloud.tencent.com/document/product/436/6224 文档 +// static char TEST_COS_ENDPOINT[] = "cos.ap-guangzhou.myqcloud.com"; +// static char TEST_COS_ENDPOINT[] = "http://oss-cn-beijing.aliyuncs.com"; +static char TEST_COS_ENDPOINT[] = "http://cos.ap-beijing.myqcloud.com"; +// 数据万象的访问域名,详情请参见 https://cloud.tencent.com/document/product/460/31066 文档 +static char TEST_CI_ENDPOINT[] = "https://ci.ap-guangzhou.myqcloud.com"; +// 开发者拥有的项目身份ID/密钥,可在 https://console.cloud.tencent.com/cam/capi 页面获取 +static char *TEST_ACCESS_KEY_ID; // your secret_id +static char *TEST_ACCESS_KEY_SECRET; // your secret_key +// 开发者访问 COS 服务时拥有的用户维度唯一资源标识,用以标识资源,可在 https://console.cloud.tencent.com/cam/capi +// 页面获取 +// static char TEST_APPID[] = ""; // your appid +// static char TEST_APPID[] = "119"; // your appid +static char TEST_APPID[] = "1309024725"; // your appid +// the cos bucket name, syntax: [bucket]-[appid], for example: mybucket-1253666666,可在 +// https://console.cloud.tencent.com/cos5/bucket 查看 static char TEST_BUCKET_NAME[] = ""; +// static char TEST_BUCKET_NAME[] = ""; +// static char TEST_BUCKET_NAME[] = "test-bucket-119"; +static char TEST_BUCKET_NAME[] = "test0711-1309024725"; +// 对象拥有者,比如用户UIN:100000000001 +static char TEST_UIN[] = ""; // your uin +// 地域信息,枚举值可参见 https://cloud.tencent.com/document/product/436/6224 +// 文档,例如:ap-beijing、ap-hongkong、eu-frankfurt 等 +static char TEST_REGION[] = "ap-guangzhou"; // region in endpoint +// 对象键,对象(Object)在存储桶(Bucket)中的唯一标识。有关对象与对象键的进一步说明,请参见 +// https://cloud.tencent.com/document/product/436/13324 文档 +static char TEST_OBJECT_NAME1[] = "1.txt"; +static char TEST_OBJECT_NAME2[] = "test2.dat"; +static char TEST_OBJECT_NAME3[] = "test3.dat"; +static char TEST_OBJECT_NAME4[] = "multipart.txt"; +// static char TEST_DOWNLOAD_NAME2[] = "download_test2.dat"; +static char *TEST_APPEND_NAMES[] = {"test.7z.001", "test.7z.002"}; +static char TEST_DOWNLOAD_NAME3[] = "download_test3.dat"; +static char TEST_MULTIPART_OBJECT[] = "multipart.dat"; +static char TEST_DOWNLOAD_NAME4[] = "multipart_download.dat"; +static char TEST_MULTIPART_FILE[] = "test.zip"; +// static char TEST_MULTIPART_OBJECT2[] = "multipart2.dat"; +static char TEST_MULTIPART_OBJECT3[] = "multipart3.dat"; +static char TEST_MULTIPART_OBJECT4[] = "multipart4.dat"; + +static void print_headers(cos_table_t *headers) { + const cos_array_header_t *tarr; + const cos_table_entry_t *telts; + int i = 0; + + if (apr_is_empty_table(headers)) { + return; + } + + tarr = cos_table_elts(headers); + telts = (cos_table_entry_t *)tarr->elts; + + printf("headers:\n"); + for (; i < tarr->nelts; i++) { + telts = (cos_table_entry_t *)(tarr->elts + i * tarr->elt_size); + printf("%s: %s\n", telts->key, telts->val); + } +} + +void init_test_config(cos_config_t *config, int is_cname) { + cos_str_set(&config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&config->appid, TEST_APPID); + config->is_cname = is_cname; +} + +void init_test_request_options(cos_request_options_t *options, int is_cname) { + options->config = cos_config_create(options->pool); + init_test_config(options->config, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); +} + +void log_status(cos_status_t *s) { + cos_warn_log("status->code: %d", s->code); + if (s->error_code) cos_warn_log("status->error_code: %s", s->error_code); + if (s->error_msg) cos_warn_log("status->error_msg: %s", s->error_msg); + if (s->req_id) cos_warn_log("status->req_id: %s", s->req_id); +} + +void test_sign() { + cos_pool_t *p = NULL; + const unsigned char secret_key[] = "your secret_key"; + const unsigned char time_str[] = "1480932292;1481012292"; + unsigned char sign_key[40]; + cos_buf_t *fmt_str; + const char *value = NULL; + const char *uri = "/testfile"; + const char *host = "testbucket-125000000.cn-north.myqcloud.com&range=bytes%3d0-3"; + unsigned char fmt_str_hex[40]; + + cos_pool_create(&p, NULL); + fmt_str = cos_create_buf(p, 1024); + + cos_get_hmac_sha1_hexdigest(sign_key, secret_key, sizeof(secret_key) - 1, time_str, sizeof(time_str) - 1); + char *pstr = apr_pstrndup(p, (char *)sign_key, sizeof(sign_key)); + cos_warn_log("sign_key: %s", pstr); + + // method + value = "get"; + cos_buf_append_string(p, fmt_str, value, strlen(value)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // canonicalized resource(URI) + cos_buf_append_string(p, fmt_str, uri, strlen(uri)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // query-parameters + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // Host + cos_buf_append_string(p, fmt_str, "host=", sizeof("host=") - 1); + cos_buf_append_string(p, fmt_str, host, strlen(host)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + char *pstr3 = apr_pstrndup(p, (char *)fmt_str->pos, cos_buf_size(fmt_str)); + cos_warn_log("Format string: %s", pstr3); + + // Format-String sha1hash + cos_get_sha1_hexdigest(fmt_str_hex, (unsigned char *)fmt_str->pos, cos_buf_size(fmt_str)); + + char *pstr2 = apr_pstrndup(p, (char *)fmt_str_hex, sizeof(fmt_str_hex)); + cos_warn_log("Format string sha1hash: %s", pstr2); + + cos_pool_destroy(p); +} + +void test_bucket() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_acl_e cos_acl = COS_ACL_PRIVATE; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // create test bucket + s = cos_create_bucket(options, &bucket, cos_acl, &resp_headers); + log_status(s); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->encoding_type, "url"); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + log_status(s); + cos_list_object_content_t *content = NULL; + char *line = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + line = apr_psprintf(p, "%.*s\t%.*s\t%.*s\n", content->key.len, content->key.data, content->size.len, + content->size.data, content->last_modified.len, content->last_modified.data); + printf("%s", line); + printf("next marker: %s\n", list_params->next_marker.data); + } + cos_list_object_common_prefix_t *common_prefix = NULL; + cos_list_for_each_entry(cos_list_object_common_prefix_t, common_prefix, &list_params->common_prefix_list, node) { + printf("common prefix: %s\n", common_prefix->prefix.data); + } + + // delete bucket + s = cos_delete_bucket(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_list_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //获取对象列表 + cos_list_object_params_t *list_params = NULL; + cos_list_object_content_t *content = NULL; + list_params = cos_create_list_object_params(p); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (cos_status_is_ok(s)) { + printf("list object succeeded\n"); + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %.*s\n", content->key.len, content->key.data); + } + } else { + printf("list object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_bucket_lifecycle() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_t rule_list; + cos_list_init(&rule_list); + cos_lifecycle_rule_content_t *rule_content = NULL; + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule1"); + cos_str_set(&rule_content->prefix, "abc/"); + cos_str_set(&rule_content->status, "Enabled"); + rule_content->expire.days = 365; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule2"); + cos_str_set(&rule_content->prefix, "efg/"); + cos_str_set(&rule_content->status, "Disabled"); + cos_str_set(&rule_content->transition.storage_class, "Standard_IA"); + rule_content->transition.days = 999; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule3"); + cos_str_set(&rule_content->prefix, "xxx/"); + cos_str_set(&rule_content->status, "Enabled"); + rule_content->abort.days = 1; + cos_list_add_tail(&rule_content->node, &rule_list); + + s = cos_put_bucket_lifecycle(options, &bucket, &rule_list, &resp_headers); + log_status(s); + + cos_list_t rule_list_ret; + cos_list_init(&rule_list_ret); + s = cos_get_bucket_lifecycle(options, &bucket, &rule_list_ret, &resp_headers); + log_status(s); + + cos_delete_bucket_lifecycle(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_put_object_with_limit() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + + //上传对象 + cos_str_set(&file, "test_file.bin"); + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object succeeded\n"); + } else { + printf("put object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_get_object_with_limit() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + + //下载对象 + cos_str_set(&file, "test_file.bin"); + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); + if (cos_status_is_ok(s)) { + printf("get object succeeded\n"); + } else { + printf("get object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_gen_object_url() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + printf("url:%s\n", cos_gen_object_url(options, &bucket, &object)); + + cos_pool_destroy(p); +} + +void test_create_dir() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_list_t buffer; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "folder/"); + + //上传文件夹 + cos_list_init(&buffer); + s = cos_put_object_from_buffer(options, &bucket, &object, &buffer, headers, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object succeeded\n"); + } else { + printf("put object failed\n"); + } + cos_pool_destroy(p); +} + +void test_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_list_t buffer; + cos_buf_t *content = NULL; + char *str = "This is my test data."; + cos_string_t file; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + cos_list_init(&buffer); + content = cos_buf_pack(options->pool, str, strlen(str)); + cos_list_add_tail(&content->node, &buffer); + s = cos_put_object_from_buffer(options, &bucket, &object, &buffer, headers, &resp_headers); + log_status(s); + + cos_list_t download_buffer; + cos_list_init(&download_buffer); + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + s = cos_get_object_to_buffer(options, &bucket, &object, headers, NULL, &download_buffer, &resp_headers); + log_status(s); + print_headers(resp_headers); + int64_t len = 0; + int64_t size = 0; + int64_t pos = 0; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { len += cos_buf_size(content); } + char *buf = cos_pcalloc(p, (apr_size_t)(len + 1)); + buf[len] = '\0'; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { + size = cos_buf_size(content); + memcpy(buf + pos, content->pos, (size_t)size); + pos += size; + } + cos_warn_log("Download data=%s", buf); + + cos_str_set(&file, TEST_OBJECT_NAME4); + cos_str_set(&object, TEST_OBJECT_NAME4); + s = cos_put_object_from_file(options, &bucket, &object, &file, NULL, &resp_headers); + log_status(s); + + cos_str_set(&file, TEST_DOWNLOAD_NAME3); + cos_str_set(&object, TEST_OBJECT_NAME3); + s = cos_get_object_to_file(options, &bucket, &object, NULL, NULL, &file, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME2); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_delete_object(options, &bucket, &object, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME3); + s = cos_delete_object(options, &bucket, &object, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_append_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //追加上传对象 + cos_str_set(&object, TEST_OBJECT_NAME3); + int32_t count = sizeof(TEST_APPEND_NAMES) / sizeof(char *); + int32_t index = 0; + int64_t position = 0; + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + if (s->code == 200) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + position = atol(content_length_str); + } + } + for (; index < count; index++) { + cos_str_set(&file, TEST_APPEND_NAMES[index]); + s = cos_append_object_from_file(options, &bucket, &object, position, &file, NULL, &resp_headers); + log_status(s); + + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + if (s->code == 200) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + position = atol(content_length_str); + } + } + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_head_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //获取对象元数据 + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + print_headers(resp_headers); + if (cos_status_is_ok(s)) { + printf("head object succeeded\n"); + } else { + printf("head object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_check_object_exist() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_object_exist_status_e object_exist; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + // 检查对象是否存在 + s = cos_check_object_exist(options, &bucket, &object, headers, &object_exist, &resp_headers); + if (object_exist == COS_OBJECT_NON_EXIST) { + printf("object: %.*s non exist.\n", object.len, object.data); + } else if (object_exist == COS_OBJECT_EXIST) { + printf("object: %.*s exist.\n", object.len, object.data); + } else { + printf("object: %.*s unknown status.\n", object.len, object.data); + log_status(s); + } + + cos_pool_destroy(p); +} + +void test_object_restore() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_status_t *s = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_restore.dat"); + + cos_object_restore_params_t *restore_params = cos_create_object_restore_params(p); + restore_params->days = 30; + cos_str_set(&restore_params->tier, "Standard"); + s = cos_post_object_restore(options, &bucket, &object, restore_params, NULL, NULL, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void progress_callback(int64_t consumed_bytes, int64_t total_bytes) { + printf("consumed_bytes = %" APR_INT64_T_FMT ", total_bytes = %" APR_INT64_T_FMT "\n", consumed_bytes, total_bytes); +} + +void test_put_object_from_file() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_table_t *headers = NULL; + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, TEST_OBJECT_NAME4); + cos_str_set(&object, TEST_OBJECT_NAME4); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_put_object_from_file_with_sse() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_table_t *headers = NULL; + headers = cos_table_make(p, 3); + // apr_table_add(headers, "x-cos-server-side-encryption", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-algorithm", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key", "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY="); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key-MD5", "U5L61r7jcwdNvT7frmUG8g=="); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, "/home/jojoliang/data/test.jpg"); + cos_str_set(&object, "pic"); + + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + { + int i = 0; + apr_array_header_t *pp = (apr_array_header_t *)apr_table_elts(resp_headers); + for (; i < pp->nelts; i++) { + apr_table_entry_t *ele = (apr_table_entry_t *)pp->elts + i; + printf("%s: %s\n", ele->key, ele->val); + } + } + + cos_pool_destroy(p); +} + +void test_get_object_to_file_with_sse() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + cos_table_t *headers = NULL; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 3); + /* + apr_table_add(headers, "x-cos-server-side-encryption", "AES256"); + */ + /* + apr_table_add(headers, "x-cos-server-side-encryption-customer-algorithm", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key", + "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY="); apr_table_add(headers, + "x-cos-server-side-encryption-customer-key-MD5", "U5L61r7jcwdNvT7frmUG8g=="); + */ + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, "getfile"); + cos_str_set(&object, TEST_OBJECT_NAME1); + + s = cos_get_object_to_file(options, &bucket, &object, headers, params, &file, &resp_headers); + log_status(s); + + { + int i = 0; + apr_array_header_t *pp = (apr_array_header_t *)apr_table_elts(resp_headers); + for (; i < pp->nelts; i++) { + apr_table_entry_t *ele = (apr_table_entry_t *)pp->elts + i; + printf("%s: %s\n", ele->key, ele->val); + } + } + + cos_pool_destroy(p); +} + +void multipart_upload_file_from_file() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *complete_headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_upload_file_t *upload_file = NULL; + cos_status_t *s = NULL; + cos_list_upload_part_params_t *params = NULL; + cos_list_t complete_part_list; + cos_list_part_content_t *part_content = NULL; + cos_complete_part_content_t *complete_part_content = NULL; + int part_num = 1; + int64_t pos = 0; + int64_t file_length = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + complete_headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + // init mulitipart + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + // upload part from file + int res = COSE_OK; + cos_file_buf_t *fb = cos_create_file_buf(p); + res = cos_open_file_for_all_read(p, TEST_MULTIPART_FILE, fb); + if (res != COSE_OK) { + cos_error_log("Open read file fail, filename:%s\n", TEST_MULTIPART_FILE); + return; + } + file_length = fb->file_last; + apr_file_close(fb->file); + while (pos < file_length) { + upload_file = cos_create_upload_file(p); + cos_str_set(&upload_file->filename, TEST_MULTIPART_FILE); + upload_file->file_pos = pos; + pos += 2 * 1024 * 1024; + upload_file->file_last = pos < file_length ? pos : file_length; // 2MB + s = cos_upload_part_from_file(options, &bucket, &object, &upload_id, part_num++, upload_file, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Multipart upload part from file succeeded\n"); + } else { + printf("Multipart upload part from file failed\n"); + } + } + + // list part + params = cos_create_list_upload_part_params(p); + params->max_ret = 1000; + cos_list_init(&complete_part_list); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, params, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("List multipart succeeded\n"); + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", part_content->part_number.data, + part_content->size.data, part_content->last_modified.data, part_content->etag.data); + } + } else { + printf("List multipart failed\n"); + cos_pool_destroy(p); + return; + } + + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, part_content->part_number.data); + cos_str_set(&complete_part_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + } + + // complete multipart + s = cos_complete_multipart_upload(options, &bucket, &object, &upload_id, &complete_part_list, complete_headers, + &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Complete multipart upload from file succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Complete multipart upload from file failed\n"); + } + + cos_pool_destroy(p); +} + +void multipart_upload_file_from_buffer() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *complete_headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_status_t *s = NULL; + cos_list_t complete_part_list; + cos_complete_part_content_t *complete_part_content = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + complete_headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + // init mulitipart + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + // upload part from buffer + char *str = "This is my test data...."; + cos_list_t buffer; + cos_buf_t *content; + + // 上传一个分块 + cos_list_init(&buffer); + content = cos_buf_pack(p, str, strlen(str)); + cos_list_add_tail(&content->node, &buffer); + s = cos_upload_part_from_buffer(options, &bucket, &object, &upload_id, 1, &buffer, &resp_headers); + + // 直接获取etag + char *etag = apr_pstrdup(p, (char *)apr_table_get(resp_headers, "ETag")); + cos_list_init(&complete_part_list); + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, "1"); + cos_str_set(&complete_part_content->etag, etag); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + + // 也可以通过 list part 获取取etag + /* + //list part + params = cos_create_list_upload_part_params(p); + params->max_ret = 1000; + cos_list_init(&complete_part_list); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, + params, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("List multipart succeeded\n"); + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", + part_content->part_number.data, + part_content->size.data, + part_content->last_modified.data, + part_content->etag.data); + } + } else { + printf("List multipart failed\n"); + cos_pool_destroy(p); + return; + } + + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, part_content->part_number.data); + cos_str_set(&complete_part_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + } + */ + + // complete multipart + s = cos_complete_multipart_upload(options, &bucket, &object, &upload_id, &complete_part_list, complete_headers, + &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Complete multipart upload from file succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Complete multipart upload from file failed\n"); + } + + cos_pool_destroy(p); +} + +void abort_multipart_upload() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_status_t *s = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + s = cos_abort_multipart_upload(options, &bucket, &object, &upload_id, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Abort multipart upload succeeded, upload_id::%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Abort multipart upload failed\n"); + } + + cos_pool_destroy(p); +} + +void list_multipart() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_status_t *s = NULL; + cos_list_multipart_upload_params_t *list_multipart_params = NULL; + cos_list_upload_part_params_t *list_upload_param = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + list_multipart_params = cos_create_list_multipart_upload_params(p); + list_multipart_params->max_ret = 999; + s = cos_list_multipart_upload(options, &bucket, list_multipart_params, &resp_headers); + log_status(s); + + list_upload_param = cos_create_list_upload_part_params(p); + list_upload_param->max_ret = 1000; + cos_string_t upload_id; + cos_str_set(&upload_id, "149373379126aee264fecbf5fe8ddb8b9cd23b76c73ab1af0bcfd50683cc4254f81ebe2386"); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, list_upload_param, &resp_headers); + if (cos_status_is_ok(s)) { + printf("List upload part succeeded, upload_id::%.*s\n", upload_id.len, upload_id.data); + cos_list_part_content_t *part_content = NULL; + cos_list_for_each_entry(cos_list_part_content_t, part_content, &list_upload_param->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", part_content->part_number.data, + part_content->size.data, part_content->last_modified.data, part_content->etag.data); + } + } else { + printf("List upload part failed\n"); + } + + cos_pool_destroy(p); +} + +void test_resumable() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t filepath; + cos_resumable_clt_params_t *clt_params; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT4); + cos_str_set(&filepath, TEST_DOWNLOAD_NAME4); + + clt_params = cos_create_resumable_clt_params_content(p, 5 * 1024 * 1024, 3, COS_FALSE, NULL); + s = cos_resumable_download_file(options, &bucket, &object, &filepath, NULL, NULL, clt_params, NULL); + log_status(s); + + cos_pool_destroy(p); +} + +void test_resumable_upload_with_multi_threads() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t filename; + cos_status_t *s = NULL; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_resumable_clt_params_t *clt_params; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT4); + cos_str_set(&filename, TEST_MULTIPART_FILE); + + // upload + clt_params = cos_create_resumable_clt_params_content(p, 1024 * 1024, 8, COS_FALSE, NULL); + s = cos_resumable_upload_file(options, &bucket, &object, &filename, headers, NULL, clt_params, NULL, &resp_headers, + NULL); + + if (cos_status_is_ok(s)) { + printf("upload succeeded\n"); + } else { + printf("upload failed\n"); + } + + cos_pool_destroy(p); +} + +void test_delete_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_status_t *s = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + char *object_name1 = TEST_OBJECT_NAME2; + char *object_name2 = TEST_OBJECT_NAME3; + cos_object_key_t *content1 = NULL; + cos_object_key_t *content2 = NULL; + cos_list_t object_list; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_init(&object_list); + cos_list_init(&deleted_object_list); + content1 = cos_create_cos_object_key(p); + cos_str_set(&content1->key, object_name1); + cos_list_add_tail(&content1->node, &object_list); + content2 = cos_create_cos_object_key(p); + cos_str_set(&content2->key, object_name2); + cos_list_add_tail(&content2->node, &object_list); + + s = cos_delete_objects(options, &bucket, &object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + + cos_pool_destroy(p); + + if (cos_status_is_ok(s)) { + printf("delete objects succeeded\n"); + } else { + printf("delete objects failed\n"); + } +} + +void test_delete_objects_by_prefix() { + cos_pool_t *p = NULL; + cos_request_options_t *options = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_status_t *s = NULL; + cos_string_t prefix; + char *prefix_str = ""; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&prefix, prefix_str); + + s = cos_delete_objects_by_prefix(options, &bucket, &prefix); + log_status(s); + cos_pool_destroy(p); + + printf("test_delete_object_by_prefix ok\n"); +} + +void test_acl() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_acl_e cos_acl = COS_ACL_PRIVATE; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.txt"); + + // put acl + cos_string_t read; + cos_str_set(&read, "id=\"qcs::cam::uin/12345:uin/12345\", id=\"qcs::cam::uin/45678:uin/45678\""); + s = cos_put_bucket_acl(options, &bucket, cos_acl, &read, NULL, NULL, &resp_headers); + log_status(s); + + // get acl + cos_acl_params_t *acl_params = NULL; + acl_params = cos_create_acl_params(p); + s = cos_get_bucket_acl(options, &bucket, acl_params, &resp_headers); + log_status(s); + printf("acl owner id:%s, name:%s\n", acl_params->owner_id.data, acl_params->owner_name.data); + cos_acl_grantee_content_t *acl_content = NULL; + cos_list_for_each_entry(cos_acl_grantee_content_t, acl_content, &acl_params->grantee_list, node) { + printf("acl grantee type:%s, id:%s, name:%s, permission:%s\n", acl_content->type.data, acl_content->id.data, + acl_content->name.data, acl_content->permission.data); + } + + // put acl + s = cos_put_object_acl(options, &bucket, &object, cos_acl, &read, NULL, NULL, &resp_headers); + log_status(s); + + // get acl + cos_acl_params_t *acl_params2 = NULL; + acl_params2 = cos_create_acl_params(p); + s = cos_get_object_acl(options, &bucket, &object, acl_params2, &resp_headers); + log_status(s); + printf("acl owner id:%s, name:%s\n", acl_params2->owner_id.data, acl_params2->owner_name.data); + acl_content = NULL; + cos_list_for_each_entry(cos_acl_grantee_content_t, acl_content, &acl_params2->grantee_list, node) { + printf("acl grantee id:%s, name:%s, permission:%s\n", acl_content->id.data, acl_content->name.data, + acl_content->permission.data); + } + + cos_pool_destroy(p); +} + +void test_copy() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //设置对象复制 + cos_str_set(&object, TEST_OBJECT_NAME2); + cos_str_set(&src_bucket, TEST_BUCKET_NAME); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME1); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, NULL, params, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object copy succeeded\n"); + } else { + printf("put object copy failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_modify_storage_class() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + cos_str_set(&src_bucket, TEST_BUCKET_NAME); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME1); + + // 设置x-cos-metadata-directive和x-cos-storage-class头域(替换为自己要更改的存储类型) + cos_table_t *headers = cos_table_make(p, 2); + apr_table_add(headers, "x-cos-metadata-directive", "Replaced"); + // 存储类型包括NTELLIGENT_TIERING,MAZ_INTELLIGENT_TIERING,STANDARD_IA,ARCHIVE,DEEP_ARCHIVE + apr_table_add(headers, "x-cos-storage-class", "ARCHIVE"); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, headers, params, + &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_copy_mt() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_copy.txt"); + cos_str_set(&src_bucket, "mybucket-1253685564"); + cos_str_set(&src_endpoint, "cn-south.myqcloud.com"); + cos_str_set(&src_object, "test.txt"); + + s = cos_upload_object_by_part_copy_mt(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, 1024 * 1024, + 8, NULL); + log_status(s); + + cos_pool_destroy(p); +} + +void test_copy_with_part_copy() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t copy_source; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_copy.txt"); + cos_str_set(©_source, "mybucket-1253685564.cn-south.myqcloud.com/test.txt"); + + s = cos_upload_object_by_part_copy(options, ©_source, &bucket, &object, 1024 * 1024); + log_status(s); + + cos_pool_destroy(p); +} + +void make_rand_string(cos_pool_t *p, int len, cos_string_t *data) { + char *str = NULL; + int i = 0; + str = (char *)cos_palloc(p, len + 1); + for (; i < len; i++) { + str[i] = 'a' + rand() % 32; + } + str[len] = '\0'; + cos_str_set(data, str); +} + +unsigned long get_file_size(const char *file_path) { + unsigned long filesize = -1; + struct stat statbuff; + + if (stat(file_path, &statbuff) < 0) { + return filesize; + } else { + filesize = statbuff.st_size; + } + + return filesize; +} + +void test_part_copy() { + cos_pool_t *p = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + int is_cname = 0; + cos_string_t upload_id; + cos_list_upload_part_params_t *list_upload_part_params = NULL; + cos_upload_part_copy_params_t *upload_part_copy_params1 = NULL; + cos_upload_part_copy_params_t *upload_part_copy_params2 = NULL; + cos_table_t *headers = NULL; + cos_table_t *query_params = NULL; + cos_table_t *resp_headers = NULL; + cos_table_t *list_part_resp_headers = NULL; + cos_list_t complete_part_list; + cos_list_part_content_t *part_content = NULL; + cos_complete_part_content_t *complete_content = NULL; + cos_table_t *complete_resp_headers = NULL; + cos_status_t *s = NULL; + int part1 = 1; + int part2 = 2; + char *local_filename = "test_upload_part_copy.file"; + char *download_filename = "test_upload_part_copy.file.download"; + char *source_object_name = "cos_test_upload_part_copy_source_object"; + char *dest_object_name = "cos_test_upload_part_copy_dest_object"; + FILE *fd = NULL; + cos_string_t download_file; + cos_string_t dest_bucket; + cos_string_t dest_object; + int64_t range_start1 = 0; + int64_t range_end1 = 6000000; + int64_t range_start2 = 6000001; + int64_t range_end2; + cos_string_t data; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + + // create multipart upload local file + make_rand_string(p, 10 * 1024 * 1024, &data); + fd = fopen(local_filename, "w"); + fwrite(data.data, sizeof(data.data[0]), data.len, fd); + fclose(fd); + + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, source_object_name); + cos_str_set(&file, local_filename); + s = cos_put_object_from_file(options, &bucket, &object, &file, NULL, &resp_headers); + log_status(s); + + // init mulitipart + cos_str_set(&object, dest_object_name); + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, NULL, &resp_headers); + log_status(s); + + // upload part copy 1 + upload_part_copy_params1 = cos_create_upload_part_copy_params(p); + cos_str_set(&upload_part_copy_params1->copy_source, + "bucket-appid.cn-south.myqcloud.com/cos_test_upload_part_copy_source_object"); + cos_str_set(&upload_part_copy_params1->dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&upload_part_copy_params1->dest_object, dest_object_name); + cos_str_set(&upload_part_copy_params1->upload_id, upload_id.data); + upload_part_copy_params1->part_num = part1; + upload_part_copy_params1->range_start = range_start1; + upload_part_copy_params1->range_end = range_end1; + headers = cos_table_make(p, 0); + s = cos_upload_part_copy(options, upload_part_copy_params1, headers, &resp_headers); + log_status(s); + printf("last modified:%s, etag:%s\n", upload_part_copy_params1->rsp_content->last_modify.data, + upload_part_copy_params1->rsp_content->etag.data); + + // upload part copy 2 + resp_headers = NULL; + range_end2 = get_file_size(local_filename) - 1; + upload_part_copy_params2 = cos_create_upload_part_copy_params(p); + cos_str_set(&upload_part_copy_params2->copy_source, + "bucket-appid.cn-south.myqcloud.com/cos_test_upload_part_copy_source_object"); + cos_str_set(&upload_part_copy_params2->dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&upload_part_copy_params2->dest_object, dest_object_name); + cos_str_set(&upload_part_copy_params2->upload_id, upload_id.data); + upload_part_copy_params2->part_num = part2; + upload_part_copy_params2->range_start = range_start2; + upload_part_copy_params2->range_end = range_end2; + headers = cos_table_make(p, 0); + s = cos_upload_part_copy(options, upload_part_copy_params2, headers, &resp_headers); + log_status(s); + printf("last modified:%s, etag:%s\n", upload_part_copy_params1->rsp_content->last_modify.data, + upload_part_copy_params1->rsp_content->etag.data); + + // list part + list_upload_part_params = cos_create_list_upload_part_params(p); + list_upload_part_params->max_ret = 10; + cos_list_init(&complete_part_list); + + cos_str_set(&dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&dest_object, dest_object_name); + s = cos_list_upload_part(options, &dest_bucket, &dest_object, &upload_id, list_upload_part_params, + &list_part_resp_headers); + log_status(s); + cos_list_for_each_entry(cos_list_part_content_t, part_content, &list_upload_part_params->part_list, node) { + complete_content = cos_create_complete_part_content(p); + cos_str_set(&complete_content->part_number, part_content->part_number.data); + cos_str_set(&complete_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_content->node, &complete_part_list); + } + + // complete multipart + headers = cos_table_make(p, 0); + s = cos_complete_multipart_upload(options, &dest_bucket, &dest_object, &upload_id, &complete_part_list, headers, + &complete_resp_headers); + log_status(s); + + // check upload copy part content equal to local file + headers = cos_table_make(p, 0); + cos_str_set(&download_file, download_filename); + s = cos_get_object_to_file(options, &dest_bucket, &dest_object, headers, query_params, &download_file, &resp_headers); + log_status(s); + printf("local file len = %" APR_INT64_T_FMT ", download file len = %" APR_INT64_T_FMT, get_file_size(local_filename), + get_file_size(download_filename)); + remove(download_filename); + remove(local_filename); + cos_pool_destroy(p); + + printf("test part copy ok\n"); +} + +void test_cors() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_t rule_list; + cos_list_init(&rule_list); + cos_cors_rule_content_t *rule_content = NULL; + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule1"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq1.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "xxx"); + rule_content->max_age_seconds = 3600; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule2"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq2.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "yyy"); + rule_content->max_age_seconds = 7200; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule3"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq3.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "zzz"); + rule_content->max_age_seconds = 60; + cos_list_add_tail(&rule_content->node, &rule_list); + + // put cors + s = cos_put_bucket_cors(options, &bucket, &rule_list, &resp_headers); + log_status(s); + + // get cors + cos_list_t rule_list_ret; + cos_list_init(&rule_list_ret); + s = cos_get_bucket_cors(options, &bucket, &rule_list_ret, &resp_headers); + log_status(s); + cos_cors_rule_content_t *content = NULL; + cos_list_for_each_entry(cos_cors_rule_content_t, content, &rule_list_ret, node) { + printf( + "cors id:%s, allowed_origin:%s, allowed_method:%s, allowed_header:%s, expose_header:%s, max_age_seconds:%d\n", + content->id.data, content->allowed_origin.data, content->allowed_method.data, content->allowed_header.data, + content->expose_header.data, content->max_age_seconds); + } + + // delete cors + cos_delete_bucket_cors(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_versioning() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_versioning_content_t *versioning = NULL; + versioning = cos_create_versioning_content(p); + cos_str_set(&versioning->status, "Suspended"); + + // put bucket versioning + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + + // get bucket versioning + cos_str_set(&versioning->status, ""); + s = cos_get_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + printf("bucket versioning status: %s\n", versioning->status.data); + + cos_pool_destroy(p); +} + +void test_replication() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_request_options_t *dst_options = NULL; + cos_string_t bucket; + cos_string_t dst_bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&dst_bucket, "replicationtest"); + + dst_options = cos_request_options_create(p); + init_test_request_options(dst_options, is_cname); + cos_str_set(&dst_options->config->endpoint, "cn-east.myqcloud.com"); + + // enable bucket versioning + cos_versioning_content_t *versioning = NULL; + versioning = cos_create_versioning_content(p); + cos_str_set(&versioning->status, "Enabled"); + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + s = cos_put_bucket_versioning(dst_options, &dst_bucket, versioning, &resp_headers); + log_status(s); + + cos_replication_params_t *replication_param = NULL; + replication_param = cos_create_replication_params(p); + cos_str_set(&replication_param->role, "qcs::cam::uin/100000616666:uin/100000616666"); + + cos_replication_rule_content_t *rule = NULL; + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_01"); + cos_str_set(&rule->status, "Enabled"); + cos_str_set(&rule->prefix, "test1"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_02"); + cos_str_set(&rule->status, "Disabled"); + cos_str_set(&rule->prefix, "test2"); + cos_str_set(&rule->storage_class, "Standard_IA"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_03"); + cos_str_set(&rule->status, "Enabled"); + cos_str_set(&rule->prefix, "test3"); + cos_str_set(&rule->storage_class, "Standard_IA"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + // put bucket replication + s = cos_put_bucket_replication(options, &bucket, replication_param, &resp_headers); + log_status(s); + + // get bucket replication + cos_replication_params_t *replication_param2 = NULL; + replication_param2 = cos_create_replication_params(p); + s = cos_get_bucket_replication(options, &bucket, replication_param2, &resp_headers); + log_status(s); + printf("ReplicationConfiguration role: %s\n", replication_param2->role.data); + cos_replication_rule_content_t *content = NULL; + cos_list_for_each_entry(cos_replication_rule_content_t, content, &replication_param2->rule_list, node) { + printf("ReplicationConfiguration rule, id:%s, status:%s, prefix:%s, dst_bucket:%s, storage_class:%s\n", + content->id.data, content->status.data, content->prefix.data, content->dst_bucket.data, + content->storage_class.data); + } + + // delete bucket replication + s = cos_delete_bucket_replication(options, &bucket, &resp_headers); + log_status(s); + + // disable bucket versioning + cos_str_set(&versioning->status, "Suspended"); + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + s = cos_put_bucket_versioning(dst_options, &dst_bucket, versioning, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_presigned_url() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t presigned_url; + cos_table_t *params = NULL; + cos_table_t *headers = NULL; + int sign_host = 1; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + cos_gen_presigned_url(options, &bucket, &object, 300, HTTP_GET, &presigned_url); + printf("presigned_url: %s\n", presigned_url.data); + + // 添加您自己的params和headers + params = cos_table_make(options->pool, 0); + // cos_table_add(params, "param1", "value"); + headers = cos_table_make(options->pool, 0); + // cos_table_add(headers, "header1", "value"); + + // 强烈建议sign_host为1,这样强制把host头域加入签名列表,防止越权访问问题 + cos_gen_presigned_url_safe(options, &bucket, &object, 300, HTTP_GET, headers, params, sign_host, &presigned_url); + printf("presigned_url_safe: %s\n", presigned_url.data); + + cos_pool_destroy(p); +} + +void test_head_bucket() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + options->ctl = cos_http_controller_create(options->pool, 0); + + status = cos_head_bucket(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_check_bucket_exist() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + cos_bucket_exist_status_e bucket_exist; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 检查桶是否存在 + status = cos_check_bucket_exist(options, &bucket, &bucket_exist, &resp_headers); + if (bucket_exist == COS_BUCKET_NON_EXIST) { + printf("bucket: %.*s non exist.\n", bucket.len, bucket.data); + } else if (bucket_exist == COS_BUCKET_EXIST) { + printf("bucket: %.*s exist.\n", bucket.len, bucket.data); + } else { + printf("bucket: %.*s unknown status.\n", bucket.len, bucket.data); + log_status(status); + } + + cos_pool_destroy(pool); +} + +void test_get_service() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_get_service_params_t *list_params = NULL; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + + //创建get service参数, 默认获取全部bucket + list_params = cos_create_get_service_params(options->pool); + //若将all_region设置为0,则只根据options->config->endpoint的区域进行查询 + // list_params->all_region = 0; + + status = cos_get_service(options, list_params, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + cos_get_service_content_t *content = NULL; + char *line = NULL; + cos_list_for_each_entry(cos_get_service_content_t, content, &list_params->bucket_list, node) { + line = apr_psprintf(options->pool, "%.*s\t%.*s\t%.*s\n", content->bucket_name.len, content->bucket_name.data, + content->location.len, content->location.data, content->creation_date.len, + content->creation_date.data); + printf("%s", line); + } + + cos_pool_destroy(pool); +} + +void test_website() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_website_params_t *website_params = NULL; + cos_website_params_t *website_result = NULL; + cos_website_rule_content_t *website_content = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建website参数 + website_params = cos_create_website_params(options->pool); + cos_str_set(&website_params->index, "index.html"); + cos_str_set(&website_params->redirect_protocol, "https"); + cos_str_set(&website_params->error_document, "Error.html"); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_errcode, "404"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key, "404.html"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_prefix, "docs/"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key_prefix, "documents/"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_prefix, "img/"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key, "demo.jpg"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + status = cos_put_bucket_website(options, &bucket, website_params, &resp_headers); + log_status(status); + + website_result = cos_create_website_params(options->pool); + status = cos_get_bucket_website(options, &bucket, website_result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + cos_website_rule_content_t *content = NULL; + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", website_result->index.len, website_result->index.data); + printf("index: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", website_result->redirect_protocol.len, + website_result->redirect_protocol.data); + printf("redirect protocol: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", website_result->error_document.len, website_result->error_document.data); + printf("error document: %s", line); + cos_list_for_each_entry(cos_website_rule_content_t, content, &website_result->rule_list, node) { + line = apr_psprintf(options->pool, "%.*s\t%.*s\t%.*s\t%.*s\t%.*s\n", content->condition_errcode.len, + content->condition_errcode.data, content->condition_prefix.len, content->condition_prefix.data, + content->redirect_protocol.len, content->redirect_protocol.data, + content->redirect_replace_key.len, content->redirect_replace_key.data, + content->redirect_replace_key_prefix.len, content->redirect_replace_key_prefix.data); + printf("%s", line); + } + + status = cos_delete_bucket_website(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_domain() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_domain_params_t *domain_params = NULL; + cos_domain_params_t *domain_result = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建domain参数 + domain_params = cos_create_domain_params(options->pool); + cos_str_set(&domain_params->status, "ENABLED"); + cos_str_set(&domain_params->name, "www.abc.com"); + cos_str_set(&domain_params->type, "REST"); + cos_str_set(&domain_params->forced_replacement, "CNAME"); + + status = cos_put_bucket_domain(options, &bucket, domain_params, &resp_headers); + log_status(status); + + domain_result = cos_create_domain_params(options->pool); + status = cos_get_bucket_domain(options, &bucket, domain_result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", domain_result->status.len, domain_result->status.data); + printf("status: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->name.len, domain_result->name.data); + printf("name: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->type.len, domain_result->type.data); + printf("type: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->forced_replacement.len, + domain_result->forced_replacement.data); + printf("forced_replacement: %s", line); + + cos_pool_destroy(pool); +} + +void test_logging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_logging_params_t *params = NULL; + cos_logging_params_t *result = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建logging参数 + params = cos_create_logging_params(options->pool); + cos_str_set(¶ms->target_bucket, TEST_BUCKET_NAME); + cos_str_set(¶ms->target_prefix, "logging/"); + + status = cos_put_bucket_logging(options, &bucket, params, &resp_headers); + log_status(status); + + result = cos_create_logging_params(options->pool); + status = cos_get_bucket_logging(options, &bucket, result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", result->target_bucket.len, result->target_bucket.data); + printf("target bucket: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", result->target_prefix.len, result->target_prefix.data); + printf("target prefix: %s", line); + + cos_pool_destroy(pool); +} + +void test_inventory() { + cos_pool_t *pool = NULL; + int is_cname = 0; + int inum = 3, i, len; + char buf[inum][32]; + char dest_bucket[128]; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_inventory_params_t *get_params = NULL; + cos_inventory_optional_t *optional = NULL; + cos_list_inventory_params_t *list_params = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put bucket inventory + len = snprintf(dest_bucket, 128, "qcs::cos:%s::%s", TEST_REGION, TEST_BUCKET_NAME); + dest_bucket[len] = 0; + for (i = 0; i < inum; i++) { + cos_inventory_params_t *params = cos_create_inventory_params(pool); + cos_inventory_optional_t *optional; + len = snprintf(buf[i], 32, "id%d", i); + buf[i][len] = 0; + cos_str_set(¶ms->id, buf[i]); + cos_str_set(¶ms->is_enabled, "true"); + cos_str_set(¶ms->frequency, "Daily"); + cos_str_set(¶ms->filter_prefix, "myPrefix"); + cos_str_set(¶ms->included_object_versions, "All"); + cos_str_set(¶ms->destination.format, "CSV"); + cos_str_set(¶ms->destination.account_id, TEST_UIN); + cos_str_set(¶ms->destination.bucket, dest_bucket); + cos_str_set(¶ms->destination.prefix, "invent"); + params->destination.encryption = 1; + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "Size"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "LastModifiedDate"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "ETag"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "StorageClass"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "ReplicationStatus"); + cos_list_add_tail(&optional->node, ¶ms->fields); + + status = cos_put_bucket_inventory(options, &bucket, params, &resp_headers); + log_status(status); + } + + // get inventory + get_params = cos_create_inventory_params(pool); + cos_str_set(&get_params->id, buf[inum / 2]); + status = cos_get_bucket_inventory(options, &bucket, get_params, &resp_headers); + log_status(status); + + printf("id: %s\nis_enabled: %s\nfrequency: %s\nfilter_prefix: %s\nincluded_object_versions: %s\n", + get_params->id.data, get_params->is_enabled.data, get_params->frequency.data, get_params->filter_prefix.data, + get_params->included_object_versions.data); + printf("destination:\n"); + printf("\tencryption: %d\n", get_params->destination.encryption); + printf("\tformat: %s\n", get_params->destination.format.data); + printf("\taccount_id: %s\n", get_params->destination.account_id.data); + printf("\tbucket: %s\n", get_params->destination.bucket.data); + printf("\tprefix: %s\n", get_params->destination.prefix.data); + cos_list_for_each_entry(cos_inventory_optional_t, optional, &get_params->fields, node) { + printf("field: %s\n", optional->field.data); + } + + // list inventory + list_params = cos_create_list_inventory_params(pool); + status = cos_list_bucket_inventory(options, &bucket, list_params, &resp_headers); + log_status(status); + + get_params = NULL; + cos_list_for_each_entry(cos_inventory_params_t, get_params, &list_params->inventorys, node) { + printf("id: %s\nis_enabled: %s\nfrequency: %s\nfilter_prefix: %s\nincluded_object_versions: %s\n", + get_params->id.data, get_params->is_enabled.data, get_params->frequency.data, get_params->filter_prefix.data, + get_params->included_object_versions.data); + printf("destination:\n"); + printf("\tencryption: %d\n", get_params->destination.encryption); + printf("\tformat: %s\n", get_params->destination.format.data); + printf("\taccount_id: %s\n", get_params->destination.account_id.data); + printf("\tbucket: %s\n", get_params->destination.bucket.data); + printf("\tprefix: %s\n", get_params->destination.prefix.data); + cos_list_for_each_entry(cos_inventory_optional_t, optional, &get_params->fields, node) { + printf("field: %s\n", optional->field.data); + } + } + + // delete inventory + for (i = 0; i < inum; i++) { + cos_string_t id; + cos_str_set(&id, buf[i]); + status = cos_delete_bucket_inventory(options, &bucket, &id, &resp_headers); + log_status(status); + } + + cos_pool_destroy(pool); +} + +void test_bucket_tagging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_tagging_params_t *params = NULL; + cos_tagging_params_t *result = NULL; + cos_tagging_tag_t *tag = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put tagging + params = cos_create_tagging_params(pool); + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "age"); + cos_str_set(&tag->value, "18"); + cos_list_add_tail(&tag->node, ¶ms->node); + + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "name"); + cos_str_set(&tag->value, "xiaoming"); + cos_list_add_tail(&tag->node, ¶ms->node); + + status = cos_put_bucket_tagging(options, &bucket, params, &resp_headers); + log_status(status); + + // get tagging + result = cos_create_tagging_params(pool); + status = cos_get_bucket_tagging(options, &bucket, result, &resp_headers); + log_status(status); + + tag = NULL; + cos_list_for_each_entry(cos_tagging_tag_t, tag, &result->node, node) { + printf("taging key: %s\n", tag->key.data); + printf("taging value: %s\n", tag->value.data); + } + + // delete tagging + status = cos_delete_bucket_tagging(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_object_tagging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t version_id = cos_string(""); + cos_tagging_params_t *params = NULL; + cos_tagging_params_t *result = NULL; + cos_tagging_tag_t *tag = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + // put object tagging + params = cos_create_tagging_params(pool); + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "age"); + cos_str_set(&tag->value, "18"); + cos_list_add_tail(&tag->node, ¶ms->node); + + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "name"); + cos_str_set(&tag->value, "xiaoming"); + cos_list_add_tail(&tag->node, ¶ms->node); + + status = cos_put_object_tagging(options, &bucket, &object, &version_id, NULL, params, &resp_headers); + log_status(status); + + // get object tagging + result = cos_create_tagging_params(pool); + status = cos_get_object_tagging(options, &bucket, &object, &version_id, NULL, result, &resp_headers); + log_status(status); + + tag = NULL; + cos_list_for_each_entry(cos_tagging_tag_t, tag, &result->node, node) { + printf("taging key: %s\n", tag->key.data); + printf("taging value: %s\n", tag->value.data); + } + + // delete tagging + status = cos_delete_object_tagging(options, &bucket, &object, &version_id, NULL, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +static void log_get_referer(cos_referer_params_t *result) { + int index = 0; + cos_referer_domain_t *domain; + + cos_warn_log("status: %s", result->status.data); + cos_warn_log("referer_type: %s", result->referer_type.data); + cos_warn_log("empty_refer_config: %s", result->empty_refer_config.data); + + cos_list_for_each_entry(cos_referer_domain_t, domain, &result->domain_list, node) { + cos_warn_log("domain index:%d", ++index); + cos_warn_log("domain: %s", domain->domain.data); + } +} + +void test_referer() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_referer_params_t *params = NULL; + cos_referer_domain_t *domain = NULL; + cos_referer_params_t *result = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/32492 + params = cos_create_referer_params(pool); + cos_str_set(¶ms->status, "Enabled"); + cos_str_set(¶ms->referer_type, "White-List"); + cos_str_set(¶ms->empty_refer_config, "Allow"); + domain = cos_create_referer_domain(pool); + cos_str_set(&domain->domain, "www.qq.com"); + cos_list_add_tail(&domain->node, ¶ms->domain_list); + domain = cos_create_referer_domain(pool); + cos_str_set(&domain->domain, "*.tencent.com"); + cos_list_add_tail(&domain->node, ¶ms->domain_list); + + // put referer + status = cos_put_bucket_referer(options, &bucket, params, &resp_headers); + log_status(status); + + // get referer + result = cos_create_referer_params(pool); + status = cos_get_bucket_referer(options, &bucket, result, &resp_headers); + log_status(status); + if (status->code == 200) { + log_get_referer(result); + } + + cos_pool_destroy(pool); +} + +void test_intelligenttiering() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_intelligenttiering_params_t *params = NULL; + cos_intelligenttiering_params_t *result = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put intelligenttiering + params = cos_create_intelligenttiering_params(pool); + cos_str_set(¶ms->status, "Enabled"); + params->days = 30; + + status = cos_put_bucket_intelligenttiering(options, &bucket, params, &resp_headers); + log_status(status); + + // get intelligenttiering + result = cos_create_intelligenttiering_params(pool); + status = cos_get_bucket_intelligenttiering(options, &bucket, result, &resp_headers); + log_status(status); + + printf("status: %s\n", result->status.data); + printf("days: %d\n", result->days); + cos_pool_destroy(pool); +} + +void test_delete_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->prefix, "folder/"); + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + + s = cos_delete_objects(options, &bucket, &list_params->object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("delete objects failed, req_id:%s\n", s->req_id); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_list_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + // prefix表示列出的object的key以prefix开始 + cos_str_set(&list_params->prefix, "folder/"); + // deliter表示分隔符, 设置为/表示列出当前目录下的object, 设置为空表示列出所有的object + cos_str_set(&list_params->delimiter, "/"); + // 设置最大遍历出多少个对象, 一次listobject最大支持1000 + list_params->max_ret = 1000; + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + // list_params->object_list 返回列出的object对象。 + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %s\n", content->key.data); + } + // list_params->common_prefix_list 表示被delimiter截断的路径, 如delimter设置为/, common prefix则表示所有子目录的路径 + cos_list_object_common_prefix_t *common_prefix = NULL; + cos_list_for_each_entry(cos_list_object_common_prefix_t, common_prefix, &list_params->common_prefix_list, node) { + printf("common prefix: %s\n", common_prefix->prefix.data); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_list_all_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + // 设置最大遍历出多少个对象, 一次listobject最大支持1000 + list_params->max_ret = 1000; + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + cos_list_init(&list_params->object_list); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + // list_params->object_list 返回列出的object对象。 + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %s\n", content->key.data); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_download_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t file_name; + cos_string_t suffix = cos_string("/"); + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_table_t *params = NULL; + int is_truncated = 1; + cos_string_t marker; + apr_status_t status; + + //初始化请求选项 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->prefix, "folder/"); //替换为您自己的目录名称 + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + cos_str_set(&file_name, content->key.data); + if (cos_ends_with(&content->key, &suffix)) { + //如果是目录需要先创建, 0x0755权限可以自己按需修改,参考apr_file_info.h中定义 + status = apr_dir_make(content->key.data, 0x0755, options->pool); + if (status != APR_SUCCESS && !APR_STATUS_IS_EEXIST(status)) { + printf("mkdir: %s failed, status: %d\n", content->key.data, status); + } + } else { + //下载对象到本地目录,这里默认下载在程序运行的当前目录 + s = cos_get_object_to_file(options, &bucket, &content->key, headers, params, &file_name, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("get object[%s] failed, req_id:%s\n", content->key.data, s->req_id); + } + } + } + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_move() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //设置对象复制 + cos_str_set(&object, TEST_OBJECT_NAME1); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME2); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &bucket, &src_object, &src_endpoint, &bucket, &object, NULL, params, &resp_headers); + log_status(s); + if (cos_status_is_ok(s)) { + s = cos_delete_object(options, &bucket, &src_object, &resp_headers); + log_status(s); + printf("move object succeeded\n"); + } else { + printf("move object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +// 基础图片处理 +void test_ci_base_image_process() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/thumbnail/!50p", ""); + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } + cos_pool_destroy(p); +} + +// 持久化处理 +void test_ci_image_process() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + ci_operation_result_t *results = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.jpg"); + + // 云上数据处理 + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"imageView2/format/png\"}]}"); + s = ci_image_process(options, &bucket, &object, headers, &resp_headers, &results); + log_status(s); + printf("origin key: %s\n", results->origin.key.data); + printf("process key: %s\n", results->object.key.data); + + // 上传时处理 + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"imageView2/format/png\"}]}"); + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = ci_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers, &results); + log_status(s); + printf("origin key: %s\n", results->origin.key.data); + printf("process key: %s\n", results->object.key.data); + + cos_pool_destroy(p); +} + +// 二维码识别 +void test_ci_image_qrcode() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + ci_operation_result_t *results = NULL; + ci_qrcode_info_t *content = NULL; + ci_qrcode_result_t *result2 = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.jpg"); + + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"QRcode/cover/1\"}]}"); + // 上传时识别 + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = ci_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers, &results); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("put object failed\n"); + } + printf("CodeStatus: %d\n", results->object.code_status); + cos_list_for_each_entry(ci_qrcode_info_t, content, &results->object.qrcode_info, node) { + printf("CodeUrl: %s\n", content->code_url.data); + printf("Point: %s\n", content->point[0].data); + printf("Point: %s\n", content->point[1].data); + printf("Point: %s\n", content->point[2].data); + printf("Point: %s\n", content->point[3].data); + } + + // 下载时识别 + s = ci_get_qrcode(options, &bucket, &object, 1, NULL, NULL, &resp_headers, &result2); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("get object failed\n"); + } + printf("CodeStatus: %d\n", result2->code_status); + cos_list_for_each_entry(ci_qrcode_info_t, content, &result2->qrcode_info, node) { + printf("CodeUrl: %s\n", content->code_url.data); + printf("Point: %s\n", content->point[0].data); + printf("Point: %s\n", content->point[1].data); + printf("Point: %s\n", content->point[2].data); + printf("Point: %s\n", content->point[3].data); + } + printf("ImageResult: %s\n", result2->result_image.data); + + //销毁内存池 + cos_pool_destroy(p); +} + +// 图片压缩 +void test_ci_image_compression() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/format/tpg", ""); + cos_str_set(&object, "test.jpg"); + cos_str_set(&file, "test.tpg"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/format/heif", ""); + cos_str_set(&file, "test.heif"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } +} + +static void log_video_auditing_result(ci_video_auditing_job_result_t *result) { + cos_warn_log("jobid: %s", result->jobs_detail.job_id.data); + cos_warn_log("state: %s", result->jobs_detail.state.data); + cos_warn_log("creation_time: %s", result->jobs_detail.creation_time.data); +} + +static void log_get_auditing_result(ci_auditing_job_result_t *result) { + int index = 0; + ci_auditing_snapshot_result_t *snapshot_info; + ci_auditing_audio_section_result_t *audio_section_info; + + cos_warn_log("nonexist_job_ids: %s", result->nonexist_job_ids.data); + cos_warn_log("code: %s", result->jobs_detail.code.data); + cos_warn_log("message: %s", result->jobs_detail.message.data); + cos_warn_log("state: %s", result->jobs_detail.state.data); + cos_warn_log("creation_time: %s", result->jobs_detail.creation_time.data); + cos_warn_log("object: %s", result->jobs_detail.object.data); + cos_warn_log("snapshot_count: %s", result->jobs_detail.snapshot_count.data); + cos_warn_log("result: %d", result->jobs_detail.result); + + cos_warn_log("porn_info.hit_flag: %d", result->jobs_detail.porn_info.hit_flag); + cos_warn_log("porn_info.count: %d", result->jobs_detail.porn_info.count); + cos_warn_log("terrorism_info.hit_flag: %d", result->jobs_detail.terrorism_info.hit_flag); + cos_warn_log("terrorism_info.count: %d", result->jobs_detail.terrorism_info.count); + cos_warn_log("politics_info.hit_flag: %d", result->jobs_detail.politics_info.hit_flag); + cos_warn_log("politics_info.count: %d", result->jobs_detail.politics_info.count); + cos_warn_log("ads_info.hit_flag: %d", result->jobs_detail.ads_info.hit_flag); + cos_warn_log("ads_info.count: %d", result->jobs_detail.ads_info.count); + + cos_list_for_each_entry(ci_auditing_snapshot_result_t, snapshot_info, &result->jobs_detail.snapshot_info_list, node) { + cos_warn_log("snapshot index:%d", ++index); + cos_warn_log("snapshot_info->url: %s", snapshot_info->url.data); + cos_warn_log("snapshot_info->snapshot_time: %d", snapshot_info->snapshot_time); + cos_warn_log("snapshot_info->text: %s", snapshot_info->text.data); + + cos_warn_log("snapshot_info->porn_info.hit_flag: %d", snapshot_info->porn_info.hit_flag); + cos_warn_log("snapshot_info->porn_info.score: %d", snapshot_info->porn_info.score); + cos_warn_log("snapshot_info->porn_info.label: %s", snapshot_info->porn_info.label.data); + cos_warn_log("snapshot_info->porn_info.sub_lable: %s", snapshot_info->porn_info.sub_lable.data); + cos_warn_log("snapshot_info->terrorism_info.hit_flag: %d", snapshot_info->terrorism_info.hit_flag); + cos_warn_log("snapshot_info->terrorism_info.score: %d", snapshot_info->terrorism_info.score); + cos_warn_log("snapshot_info->terrorism_info.label: %s", snapshot_info->terrorism_info.label.data); + cos_warn_log("snapshot_info->terrorism_info.sub_lable: %s", snapshot_info->terrorism_info.sub_lable.data); + cos_warn_log("snapshot_info->politics_info.hit_flag: %d", snapshot_info->politics_info.hit_flag); + cos_warn_log("snapshot_info->politics_info.score: %d", snapshot_info->politics_info.score); + cos_warn_log("snapshot_info->politics_info.label: %s", snapshot_info->politics_info.label.data); + cos_warn_log("snapshot_info->politics_info.sub_lable: %s", snapshot_info->politics_info.sub_lable.data); + cos_warn_log("snapshot_info->ads_info.hit_flag: %d", snapshot_info->ads_info.hit_flag); + cos_warn_log("snapshot_info->ads_info.score: %d", snapshot_info->ads_info.score); + cos_warn_log("snapshot_info->ads_info.label: %s", snapshot_info->ads_info.label.data); + cos_warn_log("snapshot_info->ads_info.sub_lable: %s", snapshot_info->ads_info.sub_lable.data); + } + + index = 0; + cos_list_for_each_entry(ci_auditing_audio_section_result_t, audio_section_info, + &result->jobs_detail.audio_section_info_list, node) { + cos_warn_log("audio_section index:%d", ++index); + cos_warn_log("audio_section_info->url: %s", audio_section_info->url.data); + cos_warn_log("audio_section_info->text: %s", audio_section_info->text.data); + cos_warn_log("audio_section_info->offset_time: %d", audio_section_info->offset_time); + cos_warn_log("audio_section_info->duration: %d", audio_section_info->duration); + + cos_warn_log("audio_section_info->porn_info.hit_flag: %d", audio_section_info->porn_info.hit_flag); + cos_warn_log("audio_section_info->porn_info.score: %d", audio_section_info->porn_info.score); + cos_warn_log("audio_section_info->porn_info.key_words: %s", audio_section_info->porn_info.key_words.data); + cos_warn_log("audio_section_info->terrorism_info.hit_flag: %d", audio_section_info->terrorism_info.hit_flag); + cos_warn_log("audio_section_info->terrorism_info.score: %d", audio_section_info->terrorism_info.score); + cos_warn_log("audio_section_info->terrorism_info.key_words: %s", audio_section_info->terrorism_info.key_words.data); + cos_warn_log("audio_section_info->politics_info.hit_flag: %d", audio_section_info->politics_info.hit_flag); + cos_warn_log("audio_section_info->politics_info.score: %d", audio_section_info->politics_info.score); + cos_warn_log("audio_section_info->politics_info.key_words: %s", audio_section_info->politics_info.key_words.data); + cos_warn_log("audio_section_info->ads_info.hit_flag: %d", audio_section_info->ads_info.hit_flag); + cos_warn_log("audio_section_info->ads_info.score: %d", audio_section_info->ads_info.score); + cos_warn_log("audio_section_info->ads_info.key_words: %s", audio_section_info->ads_info.key_words.data); + } +} + +static void log_media_buckets_result(ci_media_buckets_result_t *result) { + int index = 0; + ci_media_bucket_list_t *media_bucket; + + cos_warn_log("total_count: %d", result->total_count); + cos_warn_log("page_number: %d", result->page_number); + cos_warn_log("page_size: %d", result->page_size); + + cos_list_for_each_entry(ci_media_bucket_list_t, media_bucket, &result->media_bucket_list, node) { + cos_warn_log("media_bucket index:%d", ++index); + cos_warn_log("media_bucket->bucket_id: %s", media_bucket->bucket_id.data); + cos_warn_log("media_bucket->name: %s", media_bucket->name.data); + cos_warn_log("media_bucket->region: %s", media_bucket->region.data); + cos_warn_log("media_bucket->create_time: %s", media_bucket->create_time.data); + } +} + +static void log_media_info_result(ci_media_info_result_t *result) { + // format + cos_warn_log("format.num_stream: %d", result->format.num_stream); + cos_warn_log("format.num_program: %d", result->format.num_program); + cos_warn_log("format.format_name: %s", result->format.format_name.data); + cos_warn_log("format.format_long_name: %s", result->format.format_long_name.data); + cos_warn_log("format.start_time: %f", result->format.start_time); + cos_warn_log("format.duration: %f", result->format.duration); + cos_warn_log("format.bit_rate: %d", result->format.bit_rate); + cos_warn_log("format.size: %d", result->format.size); + + // stream.video + cos_warn_log("stream.video.index: %d", result->stream.video.index); + cos_warn_log("stream.video.codec_name: %s", result->stream.video.codec_name.data); + cos_warn_log("stream.video.codec_long_name: %s", result->stream.video.codec_long_name.data); + cos_warn_log("stream.video.codec_time_base: %s", result->stream.video.codec_time_base.data); + cos_warn_log("stream.video.codec_tag_string: %s", result->stream.video.codec_tag_string.data); + cos_warn_log("stream.video.codec_tag: %s", result->stream.video.codec_tag.data); + cos_warn_log("stream.video.profile: %s", result->stream.video.profile.data); + cos_warn_log("stream.video.height: %d", result->stream.video.height); + cos_warn_log("stream.video.width: %d", result->stream.video.width); + cos_warn_log("stream.video.has_b_frame: %d", result->stream.video.has_b_frame); + cos_warn_log("stream.video.ref_frames: %d", result->stream.video.ref_frames); + cos_warn_log("stream.video.sar: %s", result->stream.video.sar.data); + cos_warn_log("stream.video.dar: %s", result->stream.video.dar.data); + cos_warn_log("stream.video.pix_format: %s", result->stream.video.pix_format.data); + cos_warn_log("stream.video.field_order: %s", result->stream.video.field_order.data); + cos_warn_log("stream.video.level: %d", result->stream.video.level); + cos_warn_log("stream.video.fps: %d", result->stream.video.fps); + cos_warn_log("stream.video.avg_fps: %s", result->stream.video.avg_fps.data); + cos_warn_log("stream.video.timebase: %s", result->stream.video.timebase.data); + cos_warn_log("stream.video.start_time: %f", result->stream.video.start_time); + cos_warn_log("stream.video.duration: %f", result->stream.video.duration); + cos_warn_log("stream.video.bit_rate: %f", result->stream.video.bit_rate); + cos_warn_log("stream.video.num_frames: %d", result->stream.video.num_frames); + cos_warn_log("stream.video.language: %s", result->stream.video.language.data); + + // stream.audio + cos_warn_log("stream.audio.index: %d", result->stream.audio.index); + cos_warn_log("stream.audio.codec_name: %s", result->stream.audio.codec_name.data); + cos_warn_log("stream.audio.codec_long_name: %s", result->stream.audio.codec_long_name.data); + cos_warn_log("stream.audio.codec_time_base: %s", result->stream.audio.codec_time_base.data); + cos_warn_log("stream.audio.codec_tag_string: %s", result->stream.audio.codec_tag_string.data); + cos_warn_log("stream.audio.codec_tag: %s", result->stream.audio.codec_tag.data); + cos_warn_log("stream.audio.sample_fmt: %s", result->stream.audio.sample_fmt.data); + cos_warn_log("stream.audio.sample_rate: %d", result->stream.audio.sample_rate); + cos_warn_log("stream.audio.channel: %d", result->stream.audio.channel); + cos_warn_log("stream.audio.channel_layout: %s", result->stream.audio.channel_layout.data); + cos_warn_log("stream.audio.timebase: %s", result->stream.audio.timebase.data); + cos_warn_log("stream.audio.start_time: %f", result->stream.audio.start_time); + cos_warn_log("stream.audio.duration: %f", result->stream.audio.duration); + cos_warn_log("stream.audio.bit_rate: %f", result->stream.audio.bit_rate); + cos_warn_log("stream.audio.language: %s", result->stream.audio.language.data); + + // stream.subtitle + cos_warn_log("stream.subtitle.index: %d", result->stream.subtitle.index); + cos_warn_log("stream.subtitle.language: %s", result->stream.subtitle.language.data); +} + +void test_ci_video_auditing() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + ci_video_auditing_job_options_t *job_options; + ci_video_auditing_job_result_t *job_result; + ci_auditing_job_result_t *auditing_result; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_CI_ENDPOINT); // https://ci..myqcloud.com + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/47316 + job_options = ci_video_auditing_job_options_create(p); + cos_str_set(&job_options->input_object, "test.mp4"); + cos_str_set(&job_options->job_conf.detect_type, "Porn,Terrorism,Politics,Ads"); + cos_str_set(&job_options->job_conf.callback_version, "Detail"); + job_options->job_conf.detect_content = 1; + cos_str_set(&job_options->job_conf.snapshot.mode, "Interval"); + job_options->job_conf.snapshot.time_interval = 1.5; + job_options->job_conf.snapshot.count = 10; + + // 提交一个视频审核任务 + s = ci_create_video_auditing_job(options, &bucket, job_options, NULL, &resp_headers, &job_result); + log_status(s); + if (s->code == 200) { + log_video_auditing_result(job_result); + } + + // 等待视频审核任务完成,此处可修改您的等待时间 + sleep(300); + + // 获取审核任务结果 + s = ci_get_auditing_job(options, &bucket, &job_result->jobs_detail.job_id, NULL, &resp_headers, &auditing_result); + log_status(s); + if (s->code == 200) { + log_get_auditing_result(auditing_result); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_media_bucket() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers; + ci_media_buckets_request_t *media_buckets_request; + ci_media_buckets_result_t *media_buckets_result; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_CI_ENDPOINT); // https://ci..myqcloud.com + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/48988 + media_buckets_request = ci_media_buckets_request_create(p); + cos_str_set(&media_buckets_request->regions, ""); + cos_str_set(&media_buckets_request->bucket_names, ""); + cos_str_set(&media_buckets_request->bucket_name, ""); + cos_str_set(&media_buckets_request->page_number, "1"); + cos_str_set(&media_buckets_request->page_size, "10"); + s = ci_describe_media_buckets(options, media_buckets_request, NULL, &resp_headers, &media_buckets_result); + log_status(s); + if (s->code == 200) { + log_media_buckets_result(media_buckets_result); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_snapshot() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + cos_list_t download_buffer; + cos_string_t object; + ci_get_snapshot_request_t *snapshot_request; + cos_buf_t *content = NULL; + cos_string_t pic_file = cos_string("snapshot.jpg"); + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.mp4"); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/55671 + snapshot_request = ci_snapshot_request_create(p); + snapshot_request->time = 7.5; + snapshot_request->width = 0; + snapshot_request->height = 0; + cos_str_set(&snapshot_request->format, "jpg"); + cos_str_set(&snapshot_request->rotate, "auto"); + cos_str_set(&snapshot_request->mode, "exactframe"); + cos_list_init(&download_buffer); + + s = ci_get_snapshot_to_buffer(options, &bucket, &object, snapshot_request, NULL, &download_buffer, &resp_headers); + log_status(s); + + int64_t len = 0; + int64_t size = 0; + int64_t pos = 0; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { len += cos_buf_size(content); } + char *buf = cos_pcalloc(p, (apr_size_t)(len + 1)); + buf[len] = '\0'; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { + size = cos_buf_size(content); + memcpy(buf + pos, content->pos, (size_t)size); + pos += size; + } + cos_warn_log("Download len:%ld data=%s", len, buf); + + s = ci_get_snapshot_to_file(options, &bucket, &object, snapshot_request, NULL, &pic_file, &resp_headers); + log_status(s); + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_media_info() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + ci_media_info_result_t *media_info; + cos_string_t object; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.mp4"); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/55672 + s = ci_get_media_info(options, &bucket, &object, NULL, &resp_headers, &media_info); + log_status(s); + if (s->code == 200) { + log_media_info_result(media_info); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +int main(int argc, char *argv[]) { + // 通过环境变量获取 SECRETID 和 SECRETKEY + // TEST_ACCESS_KEY_ID = getenv("COS_SECRETID"); + // TEST_ACCESS_KEY_SECRET = getenv("COS_SECRETKEY"); + + if (cos_http_io_initialize(NULL, 0) != COSE_OK) { + exit(1); + } + + // set log level, default COS_LOG_WARN + cos_log_set_level(COS_LOG_WARN); + + // set log output, default stderr + cos_log_set_output(NULL); + + // test_intelligenttiering(); + // test_bucket_tagging(); + // test_object_tagging(); + // test_referer(); + // test_logging(); + // test_inventory(); + // test_put_object_from_file_with_sse(); + // test_get_object_to_file_with_sse(); + // test_head_bucket(); + // test_check_bucket_exist(); + // test_get_service(); + // test_website(); + // test_domain(); + // test_delete_objects(); + // test_delete_objects_by_prefix(); + // test_bucket(); + // test_bucket_lifecycle(); + // test_object_restore(); + test_put_object_from_file(); + // test_sign(); + // test_object(); + // test_put_object_with_limit(); + // test_get_object_with_limit(); + // test_head_object(); + // test_gen_object_url(); + // test_list_objects(); + // test_list_directory(); + // test_list_all_objects(); + // test_create_dir(); + // test_append_object(); + // test_check_object_exist(); + // multipart_upload_file_from_file(); + // multipart_upload_file_from_buffer(); + // abort_multipart_upload(); + // list_multipart(); + + // pthread_t tid[20]; + // test_resumable_upload_with_multi_threads(); + // test_resumable(); + // test_bucket(); + // test_acl(); + // test_copy(); + // test_modify_storage_class(); + // test_cors(); + // test_versioning(); + // test_replication(); + // test_part_copy(); + // test_copy_with_part_copy(); + // test_move(); + // test_delete_directory(); + // test_download_directory(); + // test_presigned_url(); + // test_ci_base_image_process(); + // test_ci_image_process(); + // test_ci_image_qrcode(); + // test_ci_image_compression(); + // test_ci_video_auditing(); + // test_ci_media_process_media_bucket(); + // test_ci_media_process_snapshot(); + // test_ci_media_process_media_info(); + + // cos_http_io_deinitialize last + cos_http_io_deinitialize(); + + return 0; +} diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 0546ed7f47..1f6d0800a5 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -14,8 +14,8 @@ */ #define _DEFAULT_SOURCE -#include "os.h" #include "tglobal.h" +#include "os.h" #include "tconfig.h" #include "tgrant.h" #include "tlog.h" @@ -63,7 +63,7 @@ int32_t tsNumOfQnodeFetchThreads = 1; int32_t tsNumOfSnodeStreamThreads = 4; int32_t tsNumOfSnodeWriteThreads = 1; int32_t tsMaxStreamBackendCache = 128; // M -int32_t tsPQSortMemThreshold = 16; // M +int32_t tsPQSortMemThreshold = 16; // M // sync raft int32_t tsElectInterval = 25 * 1000; @@ -121,8 +121,8 @@ int32_t tsQueryPolicy = 1; int32_t tsQueryRspPolicy = 0; int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT bool tsEnableQueryHb = true; -bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true -bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true +bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true +bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true int32_t tsQuerySmaOptimize = 0; int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data. bool tsQueryPlannerTrace = false; @@ -235,6 +235,13 @@ int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000; bool tsFilterScalarMode = false; int32_t tsKeepTimeOffset = 0; // latency of data migration +char tsS3Endpoint[TSDB_FQDN_LEN] = ""; +char tsS3AcessKeyId[TSDB_FQDN_LEN] = ""; +char tsS3AcessKeySecret[TSDB_FQDN_LEN] = ""; +char tsS3BucketName[TSDB_FQDN_LEN] = ""; +char tsS3AppId[TSDB_FQDN_LEN] = ""; +int8_t tsS3Enabled = false; + #ifndef _STORAGE int32_t taosSetTfsCfg(SConfig *pCfg) { SConfigItem *pItem = cfgGetItem(pCfg, "dataDir"); @@ -256,7 +263,9 @@ int32_t taosSetTfsCfg(SConfig *pCfg) { int32_t taosSetTfsCfg(SConfig *pCfg); #endif -struct SConfig *taosGetCfg() { return tsCfg; } +struct SConfig *taosGetCfg() { + return tsCfg; +} static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile, char *apolloUrl) { @@ -376,7 +385,9 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, CFG_SCOPE_CLIENT) != 0) return -1; + if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, + CFG_SCOPE_CLIENT) != 0) + return -1; if (cfgAddInt32(pCfg, "metaCacheMaxSize", tsMetaCacheMaxSize, -1, INT32_MAX, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddInt32(pCfg, "slowLogThreshold", tsSlowLogThreshold, 0, INT32_MAX, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddString(pCfg, "slowLogScope", "", CFG_SCOPE_CLIENT) != 0) return -1; @@ -389,7 +400,8 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "numOfRpcSessions", tsNumOfRpcSessions, 1, 100000, CFG_SCOPE_BOTH) != 0) return -1; tsTimeToGetAvailableConn = TRANGE(tsTimeToGetAvailableConn, 20, 10000000); - if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH) != 0) return -1; + if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH) != 0) + return -1; tsNumOfTaskQueueThreads = tsNumOfCores / 2; tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4); @@ -449,7 +461,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 1, 1000000, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, CFG_SCOPE_CLIENT) != 0) return -1; - if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, + CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "queryBufferSize", tsQueryBufferSize, -1, 500000000000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "printAuth", tsPrintAuth, CFG_SCOPE_SERVER) != 0) return -1; @@ -477,7 +491,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4); if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 4, 1024, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER) != 0) + return -1; tsNumOfVnodeFetchThreads = tsNumOfCores / 4; tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4); @@ -497,7 +512,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfSnodeStreamThreads = tsNumOfCores / 4; tsNumOfSnodeStreamThreads = TRANGE(tsNumOfSnodeStreamThreads, 2, 4); - if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER) != 0) + return -1; tsNumOfSnodeWriteThreads = tsNumOfCores / 4; tsNumOfSnodeWriteThreads = TRANGE(tsNumOfSnodeWriteThreads, 2, 4); @@ -505,14 +521,18 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1; tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL); - if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_BOTH) != 0) + if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, + CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) + return -1; + if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) + return -1; - if (cfgAddInt64(pCfg, "vndCommitMaxInterval", tsVndCommitMaxIntervalMs, 1000, 1000 * 60 * 60, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt64(pCfg, "vndCommitMaxInterval", tsVndCommitMaxIntervalMs, 1000, 1000 * 60 * 60, CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, CFG_SCOPE_SERVER) != 0) return -1; @@ -542,7 +562,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX, CFG_SCOPE_SERVER) != 0) + if (cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX, + CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "udf", tsStartUdfd, CFG_SCOPE_SERVER) != 0) return -1; @@ -553,13 +574,16 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt64(pCfg, "streamBufferSize", tsStreamBufferSize, 0, INT64_MAX, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt64(pCfg, "checkpointInterval", tsCheckpointInterval, 0, INT64_MAX, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "keepTimeOffset", tsKeepTimeOffset, 0, 23, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1; + GRANT_CFG_ADD; return 0; } @@ -908,7 +932,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; - tmqMaxTopicNum= cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; + tmqMaxTopicNum = cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32; tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; @@ -948,6 +972,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32; tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32; + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); + GRANT_CFG_GET; return 0; } @@ -1020,7 +1046,7 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosSetCoreDump(enableCore); } else if (strcasecmp("enableQueryHb", name) == 0) { tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval; - } else if (strcasecmp("ttlChangeOnWrite", name) == 0) { + } else if (strcasecmp("ttlChangeOnWrite", name) == 0) { tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval; } break; @@ -1249,9 +1275,9 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { // tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval; // } else if (strcasecmp("smlBatchSize", name) == 0) { // tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32; - } else if(strcasecmp("smlTsDefaultName", name) == 0) { + } else if (strcasecmp("smlTsDefaultName", name) == 0) { tstrncpy(tsSmlTsDefaultName, cfgGetItem(pCfg, "smlTsDefaultName")->str, TSDB_COL_NAME_LEN); - } else if(strcasecmp("smlDot2Underline", name) == 0) { + } else if (strcasecmp("smlDot2Underline", name) == 0) { tsSmlDot2Underline = cfgGetItem(pCfg, "smlDot2Underline")->bval; } else if (strcasecmp("shellActivityTimer", name) == 0) { tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32; @@ -1272,6 +1298,8 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp); snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port); cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype); + } else if (strcasecmp("s3BucketName", name) == 0) { + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); } else if (strcasecmp("sDebugFlag", name) == 0) { sDebugFlag = cfgGetItem(pCfg, "sDebugFlag")->i32; } else if (strcasecmp("smaDebugFlag", name) == 0) { diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 194ffa16f6..0612f924f5 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -8,6 +8,7 @@ set( "src/vnd/vnodeCommit.c" "src/vnd/vnodeQuery.c" "src/vnd/vnodeModule.c" + "src/vnd/vnodeCos.c" "src/vnd/vnodeSvr.c" "src/vnd/vnodeSync.c" "src/vnd/vnodeSnapshot.c" @@ -134,6 +135,11 @@ else() endif() endif() +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) + target_link_libraries( vnode PUBLIC os @@ -153,6 +159,13 @@ target_link_libraries( PUBLIC transport PUBLIC stream PUBLIC index + + # s3 + cos_c_sdk + ${APR_UTIL_LIBRARY} + ${APR_LIBRARY} + ${MINIXML_LIBRARY} + ${CURL_LIBRARY} ) IF (TD_GRANT) @@ -169,7 +182,20 @@ if(${BUILD_WITH_ROCKSDB}) add_definitions(-DUSE_ROCKSDB) endif(${BUILD_WITH_ROCKSDB}) - +# s3 +FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) +IF (APR_CONFIG_BIN) + EXECUTE_PROCESS( + COMMAND ${APR_CONFIG_BIN} --includedir + OUTPUT_VARIABLE APR_INCLUDE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +ENDIF() +include_directories (${APR_INCLUDE_DIR}) +target_include_directories( + vnode + PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + ) if(${BUILD_TEST}) add_subdirectory(test) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h new file mode 100644 index 0000000000..b8510213d7 --- /dev/null +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_VND_COS_H_ +#define _TD_VND_COS_H_ + +#include "vnd.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern int8_t tsS3Enabled; + +int32_t s3Init(); +void s3CleanUp(); +void s3PutObjectFromFile(const char *file, const char *object); +void s3DeleteObjects(const char *object_name[], int nobject); + +#ifdef __cplusplus +} +#endif + +#endif /*_TD_VND_COS_H_*/ diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index a4d5715083..ebe20c0e85 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -15,6 +15,7 @@ #include "tsdb.h" #include "tsdbFS2.h" +#include "vndCos.h" typedef struct { STsdb *tsdb; @@ -41,6 +42,28 @@ static int32_t tsdbDoRemoveFileObject(SRTNer *rtner, const STFileObj *fobj) { return TARRAY2_APPEND(rtner->fopArr, op); } +static int32_t tsdbRemoveFileObjectS3(SRTNer *rtner, const STFileObj *fobj) { + int32_t code = 0, lino = 0; + + STFileOp op = { + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + const char *object_name = taosDirEntryBaseName((char *)fobj->fname); + s3DeleteObjects(&object_name, 1); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + } + return code; +} + static int32_t tsdbDoCopyFile(SRTNer *rtner, const STFileObj *from, const STFile *to) { int32_t code = 0; int32_t lino = 0; @@ -76,6 +99,33 @@ _exit: return code; } +static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile *to) { + int32_t code = 0; + int32_t lino = 0; + + char fname[TSDB_FILENAME_LEN]; + TdFilePtr fdFrom = NULL; + TdFilePtr fdTo = NULL; + + tsdbTFileName(rtner->tsdb, to, fname); + + fdFrom = taosOpenFile(from->fname, TD_FILE_READ); + if (fdFrom == NULL) code = terrno; + TSDB_CHECK_CODE(code, lino, _exit); + + char *object_name = taosDirEntryBaseName(fname); + s3PutObjectFromFile(from->fname, object_name); + + taosCloseFile(&fdFrom); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + taosCloseFile(&fdFrom); + } + return code; +} + static int32_t tsdbDoMigrateFileObj(SRTNer *rtner, const STFileObj *fobj, const SDiskID *did) { int32_t code = 0; int32_t lino = 0; @@ -123,6 +173,53 @@ _exit: return code; } +static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const SDiskID *did) { + int32_t code = 0; + int32_t lino = 0; + STFileOp op = {0}; + + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + // create new + op = (STFileOp){ + .optype = TSDB_FOP_CREATE, + .fid = fobj->f->fid, + .nf = + { + .type = fobj->f->type, + .did = did[0], + .fid = fobj->f->fid, + .cid = fobj->f->cid, + .size = fobj->f->size, + .stt[0] = + { + .level = fobj->f->stt[0].level, + }, + }, + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + // do copy the file + code = tsdbCopyFileS3(rtner, fobj, &op.nf); + TSDB_CHECK_CODE(code, lino, _exit); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + } + return code; +} + typedef struct { STsdb *tsdb; int32_t sync; @@ -201,8 +298,14 @@ static int32_t tsdbDoRetention2(void *arg) { for (int32_t ftype = 0; (ftype < TSDB_FTYPE_MAX) && (fobj = rtner->ctx->fset->farr[ftype], 1); ++ftype) { if (fobj == NULL) continue; - code = tsdbDoRemoveFileObject(rtner, fobj); - TSDB_CHECK_CODE(code, lino, _exit); + int32_t nlevel = tfsGetLevel(rtner->tsdb->pVnode->pTfs); + if (tsS3Enabled && nlevel > 1 && TSDB_FTYPE_DATA == ftype && fobj->f->did.level == nlevel - 1) { + code = tsdbRemoveFileObjectS3(rtner, fobj); + TSDB_CHECK_CODE(code, lino, _exit); + } else { + code = tsdbDoRemoveFileObject(rtner, fobj); + TSDB_CHECK_CODE(code, lino, _exit); + } } SSttLvl *lvl; @@ -228,8 +331,15 @@ static int32_t tsdbDoRetention2(void *arg) { if (fobj == NULL) continue; if (fobj->f->did.level == did.level) continue; - code = tsdbDoMigrateFileObj(rtner, fobj, &did); - TSDB_CHECK_CODE(code, lino, _exit); + + int32_t nlevel = tfsGetLevel(rtner->tsdb->pVnode->pTfs); + if (tsS3Enabled && nlevel > 1 && TSDB_FTYPE_DATA == ftype && did.level == nlevel - 1) { + code = tsdbMigrateDataFileS3(rtner, fobj, &did); + TSDB_CHECK_CODE(code, lino, _exit); + } else { + code = tsdbDoMigrateFileObj(rtner, fobj, &did); + TSDB_CHECK_CODE(code, lino, _exit); + } } // stt @@ -281,4 +391,4 @@ int32_t tsdbRetention(STsdb *tsdb, int64_t now, int32_t sync) { tsdbFreeRtnArg(arg); } return code; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c new file mode 100644 index 0000000000..1507df7074 --- /dev/null +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -0,0 +1,114 @@ +#define ALLOW_FORBID_FUNC + +#include "vndCos.h" + +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + +extern char tsS3Endpoint[]; +extern char tsS3AcessKeyId[]; +extern char tsS3AcessKeySecret[]; +extern char tsS3BucketName[]; +extern char tsS3AppId[]; + +int32_t s3Init() { + if (cos_http_io_initialize(NULL, 0) != COSE_OK) { + return -1; + } + + // set log level, default COS_LOG_WARN + cos_log_set_level(COS_LOG_WARN); + + // set log output, default stderr + cos_log_set_output(NULL); + + return 0; +} + +void s3CleanUp() { cos_http_io_deinitialize(); } + +static void log_status(cos_status_t *s) { + cos_warn_log("status->code: %d", s->code); + if (s->error_code) cos_warn_log("status->error_code: %s", s->error_code); + if (s->error_msg) cos_warn_log("status->error_msg: %s", s->error_msg); + if (s->req_id) cos_warn_log("status->req_id: %s", s->req_id); +} + +static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { + options->config = cos_config_create(options->pool); + + cos_config_t *config = options->config; + + cos_str_set(&config->endpoint, tsS3Endpoint); + cos_str_set(&config->access_key_id, tsS3AcessKeyId); + cos_str_set(&config->access_key_secret, tsS3AcessKeySecret); + cos_str_set(&config->appid, tsS3AppId); + + config->is_cname = is_cname; + + options->ctl = cos_http_controller_create(options->pool, 0); +} + +void s3PutObjectFromFile(const char *file_str, const char *object_str) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket, object, file; + cos_table_t *resp_headers; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_table_t *headers = NULL; + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + cos_str_set(&bucket, tsS3BucketName); + cos_str_set(&file, file_str); + cos_str_set(&object, object_str); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void s3DeleteObjects(const char *object_name[], int nobject) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_list_t object_list; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + + cos_list_init(&object_list); + cos_list_init(&deleted_object_list); + + for (int i = 0; i < nobject; ++i) { + cos_object_key_t *content = cos_create_cos_object_key(p); + cos_str_set(&content->key, object_name[i]); + cos_list_add_tail(&content->node, &object_list); + } + + cos_status_t *s = cos_delete_objects(options, &bucket, &object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + + cos_pool_destroy(p); + + if (cos_status_is_ok(s)) { + cos_warn_log("delete objects succeeded\n"); + } else { + cos_warn_log("delete objects failed\n"); + } +} diff --git a/source/dnode/vnode/src/vnd/vnodeModule.c b/source/dnode/vnode/src/vnd/vnodeModule.c index 74a8d14a86..6ccce5c9d7 100644 --- a/source/dnode/vnode/src/vnd/vnodeModule.c +++ b/source/dnode/vnode/src/vnd/vnodeModule.c @@ -14,6 +14,7 @@ */ #include "vnd.h" +#include "vndCos.h" typedef struct SVnodeTask SVnodeTask; struct SVnodeTask { @@ -81,6 +82,9 @@ int vnodeInit(int nthreads) { if (tqInit() < 0) { return -1; } + if (s3Init() < 0) { + return -1; + } return 0; } @@ -112,6 +116,7 @@ void vnodeCleanup() { walCleanUp(); tqCleanUp(); smaCleanUp(); + s3CleanUp(); } int vnodeScheduleTaskEx(int tpid, int (*execute)(void*), void* arg) { From ebd09ca532aecdd448dfe6693740270d55150f44 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 13:44:17 +0800 Subject: [PATCH 02/78] tsdb/write: use keep1 as minKey instead of keep2 --- source/dnode/vnode/src/tsdb/tsdbWrite.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c index 2dbac956ed..6e89b47adc 100644 --- a/source/dnode/vnode/src/tsdb/tsdbWrite.c +++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c @@ -76,7 +76,7 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq2 *pMsg) { int32_t code = 0; STsdbKeepCfg *pCfg = &pTsdb->keepCfg; TSKEY now = taosGetTimestamp(pCfg->precision); - TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep2; + TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep1; TSKEY maxKey = tsMaxKeyByPrecision[pCfg->precision]; int32_t size = taosArrayGetSize(pMsg->aSubmitTbData); @@ -107,4 +107,4 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq2 *pMsg) { _exit: return code; -} \ No newline at end of file +} From 87783a965082757fb358a5f4ceb8882f6303dfe6 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 15:37:52 +0800 Subject: [PATCH 03/78] s3query/get: pull object to local --- source/dnode/vnode/src/inc/vndCos.h | 3 + .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 21 +++++- source/dnode/vnode/src/vnd/vnodeCos.c | 75 +++++++++++++++++++ 3 files changed, 96 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index b8510213d7..d4e19e9031 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -28,6 +28,9 @@ int32_t s3Init(); void s3CleanUp(); void s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); +bool s3Exists(const char *object_name); +void s3Get(const char *object_name, const char *path); +void s3EvictCache(); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 89b7d019ae..96037ff6be 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -14,6 +14,7 @@ */ #include "tsdb.h" +#include "vndCos.h" // =============== PAGE-WISE FILE =============== int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **ppFD) { @@ -34,9 +35,23 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p pFD->flag = flag; pFD->pFD = taosOpenFile(path, flag); if (pFD->pFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - taosMemoryFree(pFD); - goto _exit; + const char *object_name = taosDirEntryBaseName((char *)path); + if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3Exists(object_name)) { + s3EvictCache(); + s3Get(object_name, path); + + pFD->pFD = taosOpenFile(path, flag); + + if (pFD->pFD == NULL) { + code = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(pFD); + goto _exit; + } + } else { + code = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(pFD); + goto _exit; + } } pFD->szPage = szPage; pFD->pgno = 0; diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 1507df7074..696632fc6f 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -112,3 +112,78 @@ void s3DeleteObjects(const char *object_name[], int nobject) { cos_warn_log("delete objects failed\n"); } } + +bool s3Exists(const char *object_name) { + bool ret = false; + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_object_exist_status_e object_exist; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + cos_str_set(&object, object_name); + + s = cos_check_object_exist(options, &bucket, &object, headers, &object_exist, &resp_headers); + if (object_exist == COS_OBJECT_NON_EXIST) { + cos_warn_log("object: %.*s non exist.\n", object.len, object.data); + } else if (object_exist == COS_OBJECT_EXIST) { + ret = true; + cos_warn_log("object: %.*s exist.\n", object.len, object.data); + } else { + cos_warn_log("object: %.*s unknown status.\n", object.len, object.data); + log_status(s); + } + + cos_pool_destroy(p); + + return ret; +} + +void s3Get(const char *object_name, const char *path) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + int traffic_limit = 0; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + if (traffic_limit) { + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + + //下载对象 + cos_str_set(&file, path); + cos_str_set(&object, object_name); + s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); + if (cos_status_is_ok(s)) { + cos_warn_log("get object succeeded\n"); + } else { + cos_warn_log("get object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void s3EvictCache() {} From 65d8af19ed74f697ac0fef7fb007b6cf67a23f9e Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 16:35:35 +0800 Subject: [PATCH 04/78] s3: new api s3Size --- source/dnode/vnode/src/inc/vndCos.h | 4 +- .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 3 +- source/dnode/vnode/src/vnd/vnodeCos.c | 45 ++++++++++++++++++- 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index d4e19e9031..6e0984c400 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -29,9 +29,9 @@ void s3CleanUp(); void s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); -void s3Get(const char *object_name, const char *path); +bool s3Get(const char *object_name, const char *path); void s3EvictCache(); - +long s3Size(const char *object_name); #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 96037ff6be..872042d9d5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -36,7 +36,8 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p pFD->pFD = taosOpenFile(path, flag); if (pFD->pFD == NULL) { const char *object_name = taosDirEntryBaseName((char *)path); - if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3Exists(object_name)) { + long s3_size = s3Size(object_name); + if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3_size > 0) { s3EvictCache(); s3Get(object_name, path); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 696632fc6f..a7b166b6c7 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -147,7 +147,8 @@ bool s3Exists(const char *object_name) { return ret; } -void s3Get(const char *object_name, const char *path) { +bool s3Get(const char *object_name, const char *path) { + bool ret = false; cos_pool_t *p = NULL; int is_cname = 0; cos_status_t *s = NULL; @@ -177,6 +178,7 @@ void s3Get(const char *object_name, const char *path) { cos_str_set(&object, object_name); s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); if (cos_status_is_ok(s)) { + ret = true; cos_warn_log("get object succeeded\n"); } else { cos_warn_log("get object failed\n"); @@ -184,6 +186,47 @@ void s3Get(const char *object_name, const char *path) { //销毁内存池 cos_pool_destroy(p); + + return ret; } void s3EvictCache() {} + +long s3Size(const char *object_name) { + long size = 0; + + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + + //获取对象元数据 + cos_str_set(&object, object_name); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + // print_headers(resp_headers); + if (cos_status_is_ok(s)) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + size = atol(content_length_str); + } + cos_warn_log("head object succeeded: %ld\n", size); + } else { + cos_warn_log("head object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); + + return size; +} From fac7e521e957ae1aaa279af48c0acd04c646817b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 7 Aug 2023 15:59:37 +0800 Subject: [PATCH 05/78] s3/evict: fetch atime from stat file --- include/os/osFile.h | 2 +- source/dnode/mgmt/mgmt_mnode/src/mmFile.c | 4 +- source/dnode/mgmt/mgmt_vnode/src/vmFile.c | 2 +- source/dnode/mgmt/node_util/src/dmEps.c | 6 +- source/dnode/mgmt/node_util/src/dmFile.c | 2 +- source/dnode/vnode/src/inc/vndCos.h | 3 +- source/dnode/vnode/src/tq/tqOffsetSnapshot.c | 2 +- source/dnode/vnode/src/tsdb/tsdbFS.c | 10 +- .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 4 +- source/dnode/vnode/src/vnd/vnodeCos.c | 66 +++++- source/libs/function/src/udfd.c | 8 +- source/libs/index/src/indexFstFile.c | 4 +- source/libs/parser/src/parTranslater.c | 207 +++++++++--------- source/libs/sync/src/syncRaftStore.c | 2 +- source/libs/wal/src/walMeta.c | 12 +- source/os/src/osFile.c | 28 ++- source/util/src/tlog.c | 60 ++--- tools/shell/src/shellEngine.c | 100 ++++----- utils/test/c/tmqDemo.c | 3 +- 19 files changed, 300 insertions(+), 225 deletions(-) diff --git a/include/os/osFile.h b/include/os/osFile.h index 0e93002706..da1f8f8b57 100644 --- a/include/os/osFile.h +++ b/include/os/osFile.h @@ -76,7 +76,7 @@ int32_t taosUnLockFile(TdFilePtr pFile); int32_t taosUmaskFile(int32_t maskVal); -int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime); +int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime); int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno); int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime); bool taosCheckExistFile(const char *pathname); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c index cb0849f4b9..64e18ef06d 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c @@ -46,7 +46,7 @@ static int32_t mmDecodeOption(SJson *pJson, SMnodeOpt *pOption) { if (code < 0) return -1; tjsonGetInt32ValueFromDouble(replica, "role", pOption->nodeRoles[i], code); if (code < 0) return -1; - if(pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER){ + if (pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER) { pOption->numOfReplicas++; } } @@ -65,7 +65,7 @@ int32_t mmReadFile(const char *path, SMnodeOpt *pOption) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%smnode.json", path, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("mnode file:%s not exist", file); return 0; } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index da7f4d4a56..ed32e75d18 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -97,7 +97,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t SWrapperCfg *pCfgs = NULL; snprintf(file, sizeof(file), "%s%svnodes.json", pMgmt->path, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("vnode file:%s not exist", file); return 0; } diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index 1564a09035..88f6b5da40 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -100,7 +100,7 @@ int32_t dmReadEps(SDnodeData *pData) { goto _OVER; } - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("dnode file:%s not exist", file); code = 0; goto _OVER; @@ -350,7 +350,7 @@ void dmRotateMnodeEpSet(SDnodeData *pData) { } void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet) { - if(!pData->validMnodeEps) return; + if (!pData->validMnodeEps) return; dmGetMnodeEpSet(pData, pEpSet); dTrace("msg is redirected, handle:%p num:%d use:%d", pMsg->info.handle, pEpSet->numOfEps, pEpSet->inUse); for (int32_t i = 0; i < pEpSet->numOfEps; ++i) { @@ -469,7 +469,7 @@ static int32_t dmReadDnodePairs(SDnodeData *pData) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%sdnode%sep.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dDebug("dnode file:%s not exist", file); code = 0; goto _OVER; diff --git a/source/dnode/mgmt/node_util/src/dmFile.c b/source/dnode/mgmt/node_util/src/dmFile.c index fb05f08c0c..c81efddcc1 100644 --- a/source/dnode/mgmt/node_util/src/dmFile.c +++ b/source/dnode/mgmt/node_util/src/dmFile.c @@ -38,7 +38,7 @@ int32_t dmReadFile(const char *path, const char *name, bool *pDeployed) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%s%s.json", path, TD_DIRSEP, name); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("file:%s not exist", file); code = 0; goto _OVER; diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index 6e0984c400..f6db7f096e 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -30,8 +30,9 @@ void s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); bool s3Get(const char *object_name, const char *path); -void s3EvictCache(); +void s3EvictCache(const char *path, long object_size); long s3Size(const char *object_name); + #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c index a4428aed43..6a66da30c6 100644 --- a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c +++ b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c @@ -60,7 +60,7 @@ int32_t tqOffsetSnapRead(STqOffsetReader* pReader, uint8_t** ppData) { } int64_t sz = 0; - if (taosStatFile(fname, &sz, NULL) < 0) { + if (taosStatFile(fname, &sz, NULL, NULL) < 0) { taosCloseFile(&pFile); taosMemoryFree(fname); return -1; diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index ec116c717e..c0c74d6b87 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -176,7 +176,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // SDelFile if (pTsdb->fs.pDelFile) { tsdbDelFileName(pTsdb, pTsdb->fs.pDelFile, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -195,7 +195,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // head ========= tsdbHeadFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pHeadF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -206,7 +206,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // data ========= tsdbDataFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pDataF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -221,7 +221,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // sma ============= tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -237,7 +237,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // stt =========== for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) { tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 872042d9d5..4d3b53bc5a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -38,7 +38,7 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p const char *object_name = taosDirEntryBaseName((char *)path); long s3_size = s3Size(object_name); if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3_size > 0) { - s3EvictCache(); + s3EvictCache(path, s3_size); s3Get(object_name, path); pFD->pFD = taosOpenFile(path, flag); @@ -66,7 +66,7 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p // not check file size when reading data files. if (flag != TD_FILE_READ) { - if (taosStatFile(path, &pFD->szFile, NULL) < 0) { + if (taosStatFile(path, &pFD->szFile, NULL, NULL) < 0) { code = TAOS_SYSTEM_ERROR(errno); taosMemoryFree(pFD->pBuf); taosCloseFile(&pFD->pFD); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index a7b166b6c7..bac38f7c35 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -190,7 +190,71 @@ bool s3Get(const char *object_name, const char *path) { return ret; } -void s3EvictCache() {} +typedef struct { + int64_t size; + int32_t atime; + char name[TSDB_FILENAME_LEN]; +} SEvictFile; + +static int32_t evictFileCompareAsce(const void *pLeft, const void *pRight) { + SEvictFile *lhs = (SEvictFile *)pLeft; + SEvictFile *rhs = (SEvictFile *)pRight; + return lhs->atime < rhs->atime ? -1 : 1; +} + +void s3EvictCache(const char *path, long object_size) { + SDiskSize disk_size = {0}; + if (taosGetDiskSize((char *)path, &disk_size) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + vError("failed to get disk:%s size since %s", path, terrstr()); + return; + } + + if (object_size >= disk_size.avail + 1 << 30) { + // evict too old files + // 1, list data files' atime under dir(path) + char dir_name[TSDB_FILENAME_LEN] = "\0"; + tstrncpy(dir_name, path, TSDB_FILENAME_LEN); + taosDirName(dir_name); + + tdbDirPtr pDir = taosOpenDir(dir_name); + if (pDir == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + vError("failed to open %s since %s", dir_name, terrstr()); + } + SArray *evict_files = taosArrayInit(16, sizeof(SEvictFile)); + tdbDirEntryPtr pDirEntry; + while ((pDirEntry = taosReadDir(pDir)) != NULL) { + char *name = taosGetDirEntryName(pDirEntry); + if (!strncmp(name + strlen(name) - 5, ".data", 5)) { + SEvictFile e_file = {0}; + + tstrncpy(e_file.name, name, TSDB_FILENAME_LEN); + taosStatFile(name, &e_file.size, NULL, &e_file.atime); + + taosArrayPush(evict_files, &e_file); + } + } + taosCloseDir(&pDir); + + // 2, sort by atime + taosArraySort(evict_files, evictFileCompareAsce); + + // 3, remove files ascendingly until we get enough object_size space + long evict_size = 0; + size_t ef_size = TARRAY_SIZE(evict_files); + for (size_t i = 0; i < ef_size; ++i) { + SEvictFile *evict_file = taosArrayGet(evict_files, i); + taosRemoveFile(evict_file->name); + evict_size += evict_file->size; + if (evict_size >= object_size) { + break; + } + } + + taosArrayDestroy(evict_files); + } +} long s3Size(const char *object_name) { long size = 0; diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 7371017111..575bce09bb 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -378,9 +378,9 @@ int32_t udfdInitializePythonPlugin(SUdfScriptPlugin *plugin) { "pyUdfDestroy", "pyUdfScalarProc", "pyUdfAggStart", "pyUdfAggFinish", "pyUdfAggProc", "pyUdfAggMerge"}; void **funcs[UDFD_MAX_PLUGIN_FUNCS] = { - (void **)&plugin->openFunc, (void **)&plugin->closeFunc, (void **)&plugin->udfInitFunc, - (void **)&plugin->udfDestroyFunc, (void **)&plugin->udfScalarProcFunc, (void **)&plugin->udfAggStartFunc, - (void **)&plugin->udfAggFinishFunc, (void **)&plugin->udfAggProcFunc, (void **)&plugin->udfAggMergeFunc}; + (void **)&plugin->openFunc, (void **)&plugin->closeFunc, (void **)&plugin->udfInitFunc, + (void **)&plugin->udfDestroyFunc, (void **)&plugin->udfScalarProcFunc, (void **)&plugin->udfAggStartFunc, + (void **)&plugin->udfAggFinishFunc, (void **)&plugin->udfAggProcFunc, (void **)&plugin->udfAggMergeFunc}; int32_t err = udfdLoadSharedLib(plugin->libPath, &plugin->lib, funcName, funcs, UDFD_MAX_PLUGIN_FUNCS); if (err != 0) { fnError("can not load python plugin. lib path %s", plugin->libPath); @@ -848,7 +848,7 @@ int32_t udfdSaveFuncBodyToFile(SFuncInfo *pFuncInfo, SUdf *udf) { char path[PATH_MAX] = {0}; udfdGetFuncBodyPath(udf, path); - bool fileExist = !(taosStatFile(path, NULL, NULL) < 0); + bool fileExist = !(taosStatFile(path, NULL, NULL, NULL) < 0); if (fileExist) { strncpy(udf->path, path, PATH_MAX); fnInfo("udfd func body file. reuse existing file %s", path); diff --git a/source/libs/index/src/indexFstFile.c b/source/libs/index/src/indexFstFile.c index e18d0bbad3..43f15f5196 100644 --- a/source/libs/index/src/indexFstFile.c +++ b/source/libs/index/src/indexFstFile.c @@ -162,7 +162,7 @@ static FORCE_INLINE int idxFileCtxGetSize(IFileCtx* ctx) { return ctx->offset; } else { int64_t file_size = 0; - taosStatFile(ctx->file.buf, &file_size, NULL); + taosStatFile(ctx->file.buf, &file_size, NULL, NULL); return (int)file_size; } } @@ -199,7 +199,7 @@ IFileCtx* idxFileCtxCreate(WriterType type, const char* path, bool readOnly, int code = taosFtruncateFile(ctx->file.pFile, 0); UNUSED(code); - code = taosStatFile(path, &ctx->file.size, NULL); + code = taosStatFile(path, &ctx->file.size, NULL, NULL); UNUSED(code); ctx->file.wBufOffset = 0; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8ce68a5c8c..6845496a03 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -31,8 +31,8 @@ #define SYSTABLE_SHOW_TYPE_OFFSET QUERY_NODE_SHOW_DNODES_STMT typedef struct SRewriteTbNameContext { - int32_t errCode; - char* pTbName; + int32_t errCode; + char* pTbName; } SRewriteTbNameContext; typedef struct STranslateContext { @@ -54,7 +54,7 @@ typedef struct STranslateContext { bool stableQuery; bool showRewrite; SNode* pPrevRoot; - SNode* pPostRoot; + SNode* pPostRoot; } STranslateContext; typedef struct SBuildTopicContext { @@ -278,10 +278,11 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode); static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode); static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal); -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt); -static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); -static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); -static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt); +static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); +static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); +static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_BY; } @@ -772,7 +773,8 @@ static SNodeList* getProjectList(const SNode* pNode) { static bool isTimeLineQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { - return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); + return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || + (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); } else if (QUERY_NODE_SET_OPERATOR == nodeType(pStmt)) { return TIME_LINE_GLOBAL == ((SSetOperator*)pStmt)->timeLineResMode; } else { @@ -791,7 +793,7 @@ static bool isGlobalTimeLineQuery(SNode* pStmt) { } static bool isTimeLineAlignedQuery(SNode* pStmt) { - SSelectStmt *pSelect = (SSelectStmt *)pStmt; + SSelectStmt* pSelect = (SSelectStmt*)pStmt; if (isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return true; } @@ -801,7 +803,7 @@ static bool isTimeLineAlignedQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT != nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return false; } - SSelectStmt *pSub = (SSelectStmt *)((STempTableNode*)pSelect->pFromTable)->pSubquery; + SSelectStmt* pSub = (SSelectStmt*)((STempTableNode*)pSelect->pFromTable)->pSubquery; if (nodesListMatch(pSelect->pPartitionByList, pSub->pPartitionByList)) { return true; } @@ -1394,7 +1396,7 @@ static bool isCountStar(SFunctionNode* pFunc) { } static int32_t rewriteCountStarAsCount1(STranslateContext* pCxt, SFunctionNode* pCount) { - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1596,9 +1598,11 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } - if (pSelect->hasInterpFunc && (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { + if (pSelect->hasInterpFunc && + (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s ignoring null value options cannot be used when applying to multiple columns", pFunc->functionName); + "%s ignoring null value options cannot be used when applying to multiple columns", + pFunc->functionName); } if (NULL != pSelect->pWindow || NULL != pSelect->pGroupByList) { @@ -1636,7 +1640,8 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu } SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && - !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { + !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && + !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, "%s function requires valid time series input", pFunc->functionName); } @@ -1706,8 +1711,8 @@ static int32_t translateForbidSysTableFunc(STranslateContext* pCxt, SFunctionNod return TSDB_CODE_SUCCESS; } - SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; - SNode* pTable = pSelect->pFromTable; + SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; + SNode* pTable = pSelect->pFromTable; if (NULL != pTable && QUERY_NODE_REAL_TABLE == nodeType(pTable) && TSDB_SYSTEM_TABLE == ((SRealTableNode*)pTable)->pMeta->tableType) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, pFunc->functionName); @@ -2296,7 +2301,8 @@ static EDealRes doCheckExprForGroupBy(SNode** pNode, void* pContext) { } } if (isScanPseudoColumnFunc(*pNode) || QUERY_NODE_COLUMN == nodeType(*pNode)) { - if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { + if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || + (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { return generateDealNodeErrMsg(pCxt, getGroupByErrorCode(pCxt), ((SExprNode*)(*pNode))->userAlias); } else { return rewriteColToSelectValFunc(pCxt, pNode); @@ -2391,14 +2397,14 @@ static int32_t checkHavingGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) if (NULL != pSelect->pHaving) { code = checkExprForGroupBy(pCxt, &pSelect->pHaving); } -/* - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); - } - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); + } + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); + } + */ return code; } @@ -2657,10 +2663,10 @@ static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSele static EDealRes doTranslateTbName(SNode** pNode, void* pContext) { switch (nodeType(*pNode)) { case QUERY_NODE_FUNCTION: { - SFunctionNode *pFunc = (SFunctionNode *)*pNode; + SFunctionNode* pFunc = (SFunctionNode*)*pNode; if (FUNCTION_TYPE_TBNAME == pFunc->funcType) { - SRewriteTbNameContext *pCxt = (SRewriteTbNameContext*)pContext; - SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + SRewriteTbNameContext* pCxt = (SRewriteTbNameContext*)pContext; + SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; return DEAL_RES_ERROR; @@ -2699,11 +2705,12 @@ static int32_t replaceTbName(STranslateContext* pCxt, SSelectStmt* pSelect) { } SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable; - if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { + if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && + TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { return TSDB_CODE_SUCCESS; } - SNode** pNode = NULL; + SNode** pNode = NULL; SRewriteTbNameContext pRewriteCxt = {0}; pRewriteCxt.pTbName = pTable->table.tableName; @@ -3110,7 +3117,8 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList code = scalarCalculateConstants(pCastFunc, &pCell->pNode); } if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) { - code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); + code = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); } else if (TSDB_CODE_SUCCESS != code) { code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch"); } @@ -3576,7 +3584,6 @@ static int32_t createDefaultEveryNode(STranslateContext* pCxt, SNode** pOutput) pEvery->isDuration = true; pEvery->literal = taosStrdup("1s"); - *pOutput = (SNode*)pEvery; return TSDB_CODE_SUCCESS; } @@ -3671,15 +3678,15 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->currClause = SQL_CLAUSE_PARTITION_BY; int32_t code = TSDB_CODE_SUCCESS; - + if (pSelect->pPartitionByList) { int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable); SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0); - if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && - 1 == pSelect->pPartitionByList->length && (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { + if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && 1 == pSelect->pPartitionByList->length && + (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { pSelect->timeLineResMode = TIME_LINE_MULTI; } - + code = translateExprList(pCxt, pSelect->pPartitionByList); } if (TSDB_CODE_SUCCESS == code) { @@ -3943,9 +3950,9 @@ static int32_t translateSetOperProject(STranslateContext* pCxt, SSetOperator* pS } snprintf(pRightExpr->aliasName, sizeof(pRightExpr->aliasName), "%s", pLeftExpr->aliasName); SNode* pProj = createSetOperProject(pSetOperator->stmtName, pLeft); - if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) - && ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID - && ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { + if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) && + ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID && + ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { ((SColumnNode*)pProj)->colId = PRIMARYKEY_TIMESTAMP_COL_ID; } if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSetOperator->pProjectionList, pProj)) { @@ -5725,7 +5732,6 @@ static int32_t translateRestoreDnode(STranslateContext* pCxt, SRestoreComponentN return buildCmdMsg(pCxt, TDMT_MND_RESTORE_DNODE, (FSerializeFunc)tSerializeSRestoreDnodeReq, &restoreReq); } - static int32_t getSmaIndexDstVgId(STranslateContext* pCxt, const char* pDbName, const char* pTableName, int32_t* pVgId) { SVgroupInfo vg = {0}; @@ -5853,7 +5859,7 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS } static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { - int32_t code = checkCreateSmaIndex(pCxt, pStmt); + int32_t code = checkCreateSmaIndex(pCxt, pStmt); pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq)); if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY; if (TSDB_CODE_SUCCESS == code) { @@ -5867,13 +5873,15 @@ int32_t createIntervalFromCreateSmaIndexStmt(SCreateIndexStmt* pStmt, SInterval* pInterval->interval = ((SValueNode*)pStmt->pOptions->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit; pInterval->offset = NULL != pStmt->pOptions->pOffset ? ((SValueNode*)pStmt->pOptions->pOffset)->datum.i : 0; - pInterval->sliding = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; - pInterval->slidingUnit = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; + pInterval->sliding = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; + pInterval->slidingUnit = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; pInterval->precision = pStmt->pOptions->tsPrecision; return TSDB_CODE_SUCCESS; } -int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void ** pResRow) { +int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { int32_t code = TSDB_CODE_SUCCESS; SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot; int64_t lastTs = 0; @@ -6041,7 +6049,7 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS toName(pCxt->pParseCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name); tNameGetFullDbName(&name, pReq->subDbName); tNameExtractFullName(&name, pReq->subStbName); - if(pStmt->pQuery != NULL) { + if (pStmt->pQuery != NULL) { code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL); } } else if ('\0' != pStmt->subDbName[0]) { @@ -6096,11 +6104,12 @@ static EDealRes checkColumnTagsInCond(SNode* pNode, void* pContext) { addTagList(&pCxt->pTags, nodesCloneNode(pNode)); } } - + return DEAL_RES_CONTINUE; } -static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, SNodeList** ppProjection) { +static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, + SNodeList** ppProjection) { SBuildTopicContext colCxt = {.colExists = false, .colNotFound = false, .pMeta = pMeta, .pTags = NULL}; nodesWalkExprPostOrder(pStmt->pWhere, checkColumnTagsInCond, &colCxt); if (colCxt.colNotFound) { @@ -6110,18 +6119,18 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* nodesDestroyList(colCxt.pTags); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Columns are forbidden in where clause"); } - if (NULL == colCxt.pTags) { // put one column to select -// for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { - SSchema* column = &pMeta->schema[0]; - SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); - if (NULL == col) { - return TSDB_CODE_OUT_OF_MEMORY; - } - strcpy(col->colName, column->name); - strcpy(col->node.aliasName, col->colName); - strcpy(col->node.userAlias, col->colName); - addTagList(&colCxt.pTags, (SNode*)col); -// } + if (NULL == colCxt.pTags) { // put one column to select + // for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { + SSchema* column = &pMeta->schema[0]; + SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == col) { + return TSDB_CODE_OUT_OF_MEMORY; + } + strcpy(col->colName, column->name); + strcpy(col->node.aliasName, col->colName); + strcpy(col->node.userAlias, col->colName); + addTagList(&colCxt.pTags, (SNode*)col); + // } } *ppProjection = colCxt.pTags; @@ -6129,13 +6138,13 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* } static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt* pStmt, SNode** pSelect) { - SParseContext* pParCxt = pCxt->pParseCxt; - SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, - .requestId = pParCxt->requestId, + SParseContext* pParCxt = pCxt->pParseCxt; + SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, + .requestId = pParCxt->requestId, .requestObjRefId = pParCxt->requestRid, .mgmtEps = pParCxt->mgmtEpSet}; - SName name; - STableMeta* pMeta = NULL; + SName name; + STableMeta* pMeta = NULL; int32_t code = getTableMetaImpl(pCxt, toName(pParCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name), &pMeta); if (code) { taosMemoryFree(pMeta); @@ -6144,7 +6153,7 @@ static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt if (TSDB_SUPER_TABLE != pMeta->tableType) { taosMemoryFree(pMeta); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Only supertable table can be used"); - } + } SNodeList* pProjection = NULL; code = checkCollectTopicTags(pCxt, pStmt, pMeta, &pProjection); @@ -6542,7 +6551,8 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must be of VARCHAR type"); } - if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { + if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && + subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must not has column when no partition by clause"); } @@ -6895,28 +6905,28 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta if (NULL == col) { return TSDB_CODE_OUT_OF_MEMORY; } - + strcpy(col->tableAlias, pTable); strcpy(col->colName, pMeta->schema[0].name); SNodeList* pParamterList = nodesMakeList(); if (NULL == pParamterList) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); return TSDB_CODE_OUT_OF_MEMORY; } - - int32_t code = nodesListStrictAppend(pParamterList, (SNode *)col); + + int32_t code = nodesListStrictAppend(pParamterList, (SNode*)col); if (code) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); nodesDestroyList(pParamterList); return code; } - + SNode* pFunc = (SNode*)createFunction("last", pParamterList); if (NULL == pFunc) { nodesDestroyList(pParamterList); return TSDB_CODE_OUT_OF_MEMORY; } - + SNodeList* pProjectionList = nodesMakeList(); if (NULL == pProjectionList) { nodesDestroyList(pParamterList); @@ -6928,8 +6938,8 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta nodesDestroyList(pProjectionList); return code; } - - code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt **)pQuery); + + code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt**)pQuery); if (code) { nodesDestroyList(pProjectionList); return code; @@ -6967,14 +6977,14 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt if (TSDB_CODE_SUCCESS == code && pStmt->pOptions->fillHistory) { SRealTableNode* pTable = (SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable); code = createLastTsSelectStmt(pTable->table.dbName, pTable->table.tableName, pTable->pMeta, &pStmt->pPrevQuery); -/* - if (TSDB_CODE_SUCCESS == code) { - STranslateContext cxt = {0}; - int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); - code = translateQuery(&cxt, pStmt->pPrevQuery); - destroyTranslateContext(&cxt); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code) { + STranslateContext cxt = {0}; + int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); + code = translateQuery(&cxt, pStmt->pPrevQuery); + destroyTranslateContext(&cxt); + } + */ } taosMemoryFree(pMeta); return code; @@ -7069,7 +7079,7 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow)) { return code; } - + SIntervalWindowNode* pWindow = (SIntervalWindowNode*)pSelect->pWindow; pInterval->interval = ((SValueNode*)pWindow->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pWindow->pInterval)->unit; @@ -7077,16 +7087,16 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval->sliding = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->datum.i : pInterval->interval); pInterval->slidingUnit = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->unit : pInterval->intervalUnit); - pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; + pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; return code; } int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { SCreateStreamStmt* pStmt = (SCreateStreamStmt*)pQuery->pRoot; - STranslateContext cxt = {0}; - SInterval interval = {0}; - int64_t lastTs = 0; + STranslateContext cxt = {0}; + SInterval interval = {0}; + int64_t lastTs = 0; int32_t code = initTranslateContext(pParseCxt, NULL, &cxt); if (TSDB_CODE_SUCCESS == code) { @@ -7121,7 +7131,6 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void return code; } - static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pStmt) { SMDropStreamReq dropReq = {0}; SName name; @@ -7152,7 +7161,7 @@ static int32_t translateResumeStream(STranslateContext* pCxt, SResumeStreamStmt* static int32_t readFromFile(char* pName, int32_t* len, char** buf) { int64_t filesize = 0; - if (taosStatFile(pName, &filesize, NULL) < 0) { + if (taosStatFile(pName, &filesize, NULL, NULL) < 0) { return TAOS_SYSTEM_ERROR(errno); } @@ -7246,7 +7255,7 @@ static int32_t translateGrantTagCond(STranslateContext* pCxt, SGrantStmt* pStmt, } } - int32_t code = createRealTableForGrantTable(pStmt, &pTable); + int32_t code = createRealTableForGrantTable(pStmt, &pTable); if (TSDB_CODE_SUCCESS == code) { SName name; code = getTableMetaImpl(pCxt, toName(pCxt->pParseCxt->acctId, pTable->table.dbName, pTable->table.tableName, &name), @@ -7806,7 +7815,8 @@ static SNodeList* createProjectCols(int32_t ncols, const char* const pCols[]) { return pProjections; } -static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { SSelectStmt* pSelect = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT); if (NULL == pSelect) { return TSDB_CODE_OUT_OF_MEMORY; @@ -7829,9 +7839,8 @@ static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, S return TSDB_CODE_SUCCESS; } - static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTable, int32_t numOfProjs, - const char* const pProjCol[], SSelectStmt** pStmt) { + const char* const pProjCol[], SSelectStmt** pStmt) { SNodeList* pProjectionList = NULL; if (numOfProjs >= 0) { pProjectionList = createProjectCols(numOfProjs, pProjCol); @@ -7843,13 +7852,15 @@ static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTabl return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } static int32_t createSelectStmtForShow(ENodeType showType, SSelectStmt** pStmt) { const SSysTableShowAdapter* pShow = &sysTableShowAdapter[showType - SYSTABLE_SHOW_TYPE_OFFSET]; - return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, pStmt); + return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, + pStmt); } static int32_t createSelectStmtForShowTableDist(SShowTableDistributedStmt* pStmt, SSelectStmt** pOutput) { @@ -7987,8 +7998,8 @@ static int32_t createShowTableTagsProjections(SNodeList** pProjections, SNodeLis static int32_t rewriteShowStableTags(STranslateContext* pCxt, SQuery* pQuery) { SShowTableTagsStmt* pShow = (SShowTableTagsStmt*)pQuery->pRoot; SSelectStmt* pSelect = NULL; - int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal, - -1, NULL, &pSelect); + int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, + ((SValueNode*)pShow->pTbName)->literal, -1, NULL, &pSelect); if (TSDB_CODE_SUCCESS == code) { code = createShowTableTagsProjections(&pSelect->pProjectionList, &pShow->pTags); } diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c index bd15567c87..051106b99d 100644 --- a/source/libs/sync/src/syncRaftStore.c +++ b/source/libs/sync/src/syncRaftStore.c @@ -42,7 +42,7 @@ int32_t raftStoreReadFile(SSyncNode *pNode) { const char *file = pNode->raftStorePath; SRaftStore *pStore = &pNode->raftStore; - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { sInfo("vgId:%d, raft store file:%s not exist, use default value", pNode->vgId, file); pStore->currentTerm = 0; pStore->voteFor.addr = 0; diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 01d23a7e96..2acdd975e5 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -53,7 +53,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); TdFilePtr pFile = taosOpenFile(fnameStr, TD_FILE_READ | TD_FILE_WRITE); if (pFile == NULL) { @@ -304,7 +304,7 @@ int walRepairLogFileTs(SWal* pWal, bool* updateMeta) { walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); int32_t mtime = 0; - if (taosStatFile(fnameStr, NULL, &mtime) < 0) { + if (taosStatFile(fnameStr, NULL, &mtime, NULL) < 0) { terrno = TAOS_SYSTEM_ERROR(errno); wError("vgId:%d, failed to stat file due to %s, file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); return -1; @@ -353,7 +353,7 @@ int walTrimIdxFile(SWal* pWal, int32_t fileIdx) { walBuildIdxName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); int64_t records = TMAX(0, pFileInfo->lastVer - pFileInfo->firstVer + 1); int64_t lastEndOffset = records * sizeof(SWalIdxEntry); @@ -436,7 +436,7 @@ int walCheckAndRepairMeta(SWal* pWal) { SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); - int32_t code = taosStatFile(fnameStr, &fileSize, NULL); + int32_t code = taosStatFile(fnameStr, &fileSize, NULL, NULL); if (code < 0) { terrno = TAOS_SYSTEM_ERROR(errno); wError("failed to stat file since %s. file:%s", terrstr(), fnameStr); @@ -522,7 +522,7 @@ int walCheckAndRepairIdxFile(SWal* pWal, int32_t fileIdx) { walBuildLogName(pWal, pFileInfo->firstVer, fLogNameStr); int64_t fileSize = 0; - if (taosStatFile(fnameStr, &fileSize, NULL) < 0 && errno != ENOENT) { + if (taosStatFile(fnameStr, &fileSize, NULL, NULL) < 0 && errno != ENOENT) { wError("vgId:%d, failed to stat file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); terrno = TAOS_SYSTEM_ERROR(errno); return -1; @@ -935,7 +935,7 @@ int walLoadMeta(SWal* pWal) { walBuildMetaName(pWal, metaVer, fnameStr); // read metafile int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); if (fileSize == 0) { (void)taosRemoveFile(fnameStr); wDebug("vgId:%d, wal find empty meta ver %d", pWal->cfg.vgId, metaVer); diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index dd670595f0..c4309b2c55 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -191,7 +191,7 @@ int32_t taosRenameFile(const char *oldName, const char *newName) { #endif } -int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { +int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime) { #ifdef WINDOWS struct _stati64 fileStat; int32_t code = _stati64(path, &fileStat); @@ -211,6 +211,10 @@ int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { *mtime = fileStat.st_mtime; } + if (atime != NULL) { + *atime = fileStat.st_mtime; + } + return 0; } int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno) { @@ -540,7 +544,7 @@ int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime) { #ifdef WINDOWS struct __stat64 fileStat; - int32_t code = _fstat64(pFile->fd, &fileStat); + int32_t code = _fstat64(pFile->fd, &fileStat); #else struct stat fileStat; int32_t code = fstat(pFile->fd, &fileStat); @@ -897,17 +901,17 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) { goto cmp_end; } - dstFp = gzdopen(pFile->fd, "wb6f"); - if (dstFp == NULL) { - ret = -3; - taosCloseFile(&pFile); - goto cmp_end; - } + dstFp = gzdopen(pFile->fd, "wb6f"); + if (dstFp == NULL) { + ret = -3; + taosCloseFile(&pFile); + goto cmp_end; + } - while (!feof(pSrcFile->fp)) { - len = (int32_t)fread(data, 1, compressSize, pSrcFile->fp); - (void)gzwrite(dstFp, data, len); - } + while (!feof(pSrcFile->fp)) { + len = (int32_t)fread(data, 1, compressSize, pSrcFile->fp); + (void)gzwrite(dstFp, data, len); + } cmp_end: if (pSrcFile) { diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index de7ad848ed..4a15b5b976 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -17,8 +17,8 @@ #include "tlog.h" #include "os.h" #include "tconfig.h" -#include "tjson.h" #include "tglobal.h" +#include "tjson.h" #define LOG_MAX_LINE_SIZE (10024) #define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) @@ -74,12 +74,12 @@ static SLogObj tsLogObj = {.fileNum = 1}; static int64_t tsAsyncLogLostLines = 0; static int32_t tsDaylightActive; /* Currently in daylight saving time. */ -bool tsLogEmbedded = 0; -bool tsAsyncLog = true; +bool tsLogEmbedded = 0; +bool tsAsyncLog = true; #ifdef ASSERT_NOT_CORE -bool tsAssert = false; +bool tsAssert = false; #else -bool tsAssert = true; +bool tsAssert = true; #endif int32_t tsNumOfLogLines = 10000000; int32_t tsLogKeepDays = 0; @@ -160,7 +160,7 @@ int32_t taosInitSlowLog() { tsLogObj.slowHandle = taosLogBuffNew(LOG_SLOW_BUF_SIZE); if (tsLogObj.slowHandle == NULL) return -1; - + taosUmaskFile(0); tsLogObj.slowHandle->pFile = taosOpenFile(fullName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); if (tsLogObj.slowHandle->pFile == NULL) { @@ -403,13 +403,13 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { strcpy(name, fn); strcat(name, ".0"); } - bool log0Exist = taosStatFile(name, NULL, &logstat0_mtime) >= 0; + bool log0Exist = taosStatFile(name, NULL, &logstat0_mtime, NULL) >= 0; if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { strcpy(name, fn); strcat(name, ".1"); } - bool log1Exist = taosStatFile(name, NULL, &logstat1_mtime) >= 0; + bool log1Exist = taosStatFile(name, NULL, &logstat1_mtime, NULL) >= 0; // if none of the log files exist, open 0, if both exists, open the old one if (!log0Exist && !log1Exist) { @@ -576,7 +576,7 @@ void taosPrintSlowLog(const char *format, ...) { } else { taosWriteFile(tsLogObj.slowHandle->pFile, buffer, len); } - + taosMemoryFree(buffer); } @@ -769,12 +769,12 @@ static void taosWriteLog(SLogBuff *pLogBuf) { static void *taosAsyncOutputLog(void *param) { SLogBuff *pLogBuf = (SLogBuff *)tsLogObj.logHandle; SLogBuff *pSlowBuf = (SLogBuff *)tsLogObj.slowHandle; - + setThreadName("log"); int32_t count = 0; int32_t updateCron = 0; int32_t writeInterval = 0; - + while (1) { writeInterval = TMIN(pLogBuf->writeInterval, pSlowBuf->writeInterval); count += writeInterval; @@ -834,12 +834,12 @@ bool taosAssertDebug(bool condition, const char *file, int32_t line, const char return true; } -void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, void *sigInfo) { +void taosLogCrashInfo(char *nodeType, char *pMsg, int64_t msgLen, int signum, void *sigInfo) { const char *flags = "UTL FATAL "; ELogLevel level = DEBUG_FATAL; int32_t dflag = 255; - char filepath[PATH_MAX] = {0}; - TdFilePtr pFile = NULL; + char filepath[PATH_MAX] = {0}; + TdFilePtr pFile = NULL; if (pMsg && msgLen > 0) { snprintf(filepath, sizeof(filepath), "%s%s.%sCrashLog", tsLogDir, TD_DIRSEP, nodeType); @@ -856,16 +856,16 @@ void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, vo int64_t writeSize = taosWriteFile(pFile, &msgLen, sizeof(msgLen)); if (sizeof(msgLen) != writeSize) { taosUnLockFile(pFile); - taosPrintLog(flags, level, dflag, "failed to write len to file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", - filepath, pFile, writeSize, sizeof(msgLen), terrstr()); + taosPrintLog(flags, level, dflag, "failed to write len to file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", + filepath, pFile, writeSize, sizeof(msgLen), terrstr()); goto _return; } writeSize = taosWriteFile(pFile, pMsg, msgLen); if (msgLen != writeSize) { taosUnLockFile(pFile); - taosPrintLog(flags, level, dflag, "failed to write file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", - filepath, pFile, writeSize, msgLen, terrstr()); + taosPrintLog(flags, level, dflag, "failed to write file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", + filepath, pFile, writeSize, msgLen, terrstr()); goto _return; } @@ -883,7 +883,7 @@ _return: taosPrintTrace(flags, level, dflag, 4); #elif !defined(WINDOWS) taosPrintLog(flags, level, dflag, "sender PID:%d cmdline:%s", ((siginfo_t *)sigInfo)->si_pid, - taosGetCmdlineByPID(((siginfo_t *)sigInfo)->si_pid)); + taosGetCmdlineByPID(((siginfo_t *)sigInfo)->si_pid)); taosPrintTrace(flags, level, dflag, 3); #else taosPrintTrace(flags, level, dflag, 8); @@ -892,17 +892,17 @@ _return: taosMemoryFree(pMsg); } -void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* pFd) { +void taosReadCrashInfo(char *filepath, char **pMsg, int64_t *pMsgLen, TdFilePtr *pFd) { const char *flags = "UTL FATAL "; ELogLevel level = DEBUG_FATAL; int32_t dflag = 255; TdFilePtr pFile = NULL; bool truncateFile = false; - char* buf = NULL; + char *buf = NULL; if (NULL == *pFd) { int64_t filesize = 0; - if (taosStatFile(filepath, &filesize, NULL) < 0) { + if (taosStatFile(filepath, &filesize, NULL, NULL) < 0) { if (ENOENT == errno) { return; } @@ -916,7 +916,7 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* return; } - pFile = taosOpenFile(filepath, TD_FILE_READ|TD_FILE_WRITE); + pFile = taosOpenFile(filepath, TD_FILE_READ | TD_FILE_WRITE); if (pFile == NULL) { if (ENOENT == errno) { return; @@ -926,7 +926,7 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* taosPrintLog(flags, level, dflag, "failed to open file:%s since %s", filepath, terrstr()); return; } - + taosLockFile(pFile); } else { pFile = *pFd; @@ -937,8 +937,8 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* if (sizeof(msgLen) != readSize) { truncateFile = true; if (readSize < 0) { - taosPrintLog(flags, level, dflag, "failed to read len from file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", - filepath, pFile, readSize, sizeof(msgLen), terrstr()); + taosPrintLog(flags, level, dflag, "failed to read len from file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", + filepath, pFile, readSize, sizeof(msgLen), terrstr()); } goto _return; } @@ -948,12 +948,12 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* taosPrintLog(flags, level, dflag, "failed to malloc buf, size:%" PRId64, msgLen); goto _return; } - + readSize = taosReadFile(pFile, buf, msgLen); if (msgLen != readSize) { truncateFile = true; - taosPrintLog(flags, level, dflag, "failed to read file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", - filepath, pFile, readSize, msgLen, terrstr()); + taosPrintLog(flags, level, dflag, "failed to read file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", filepath, + pFile, readSize, msgLen, terrstr()); goto _return; } @@ -981,7 +981,7 @@ void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile) { if (truncateFile) { taosFtruncateFile(pFile, 0); } - + taosUnLockFile(pFile); taosCloseFile(&pFile); } diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index e9dd067ac4..860622ea18 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -18,9 +18,9 @@ #define _GNU_SOURCE #define _XOPEN_SOURCE #define _DEFAULT_SOURCE -#include "shellInt.h" -#include "shellAuto.h" #include "geosWrapper.h" +#include "shellAuto.h" +#include "shellInt.h" static bool shellIsEmptyCommand(const char *cmd); static int32_t shellRunSingleCommand(char *command); @@ -41,9 +41,9 @@ static bool shellIsCommentLine(char *line); static void shellSourceFile(const char *file); static void shellGetGrantInfo(); -static void shellCleanup(void *arg); -static void *shellCancelHandler(void *arg); -static void *shellThreadLoop(void *arg); +static void shellCleanup(void *arg); +static void *shellCancelHandler(void *arg); +static void *shellThreadLoop(void *arg); bool shellIsEmptyCommand(const char *cmd) { for (char c = *cmd++; c != 0; c = *cmd++) { @@ -66,7 +66,7 @@ int32_t shellRunSingleCommand(char *command) { if (shellRegexMatch(command, "^[\t ]*clear[ \t;]*$", REG_EXTENDED | REG_ICASE)) { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-result" - system("clear"); + system("clear"); #pragma GCC diagnostic pop return 0; } @@ -142,8 +142,8 @@ int32_t shellRunCommand(char *command, bool recordHistory) { return 0; } - // add help or help; - if(strncasecmp(command, "help;", 5) == 0) { + // add help or help; + if (strncasecmp(command, "help;", 5) == 0) { showHelp(); return 0; } @@ -223,14 +223,14 @@ void shellRunSingleCommandImp(char *command) { } // pre string - char * pre = "Query OK"; + char *pre = "Query OK"; if (shellRegexMatch(command, "^\\s*delete\\s*from\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Delete OK"; - } else if(shellRegexMatch(command, "^\\s*insert\\s*into\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*insert\\s*into\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Insert OK"; - } else if(shellRegexMatch(command, "^\\s*create\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*create\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Create OK"; - } else if(shellRegexMatch(command, "^\\s*drop\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*drop\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Drop OK"; } @@ -295,7 +295,7 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { if (taosLocalTime(&tt, &ptm, buf) == NULL) { return buf; } - size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); + size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); if (precision == TSDB_TIME_PRECISION_NANO) { sprintf(buf + pos, ".%09d", ms); @@ -387,22 +387,20 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: - case TSDB_DATA_TYPE_JSON: - { - int32_t bufIndex = 0; - for (int32_t i = 0; i < length; i++) { + case TSDB_DATA_TYPE_JSON: { + int32_t bufIndex = 0; + for (int32_t i = 0; i < length; i++) { + buf[bufIndex] = val[i]; + bufIndex++; + if (val[i] == '\"') { buf[bufIndex] = val[i]; bufIndex++; - if (val[i] == '\"') { - buf[bufIndex] = val[i]; - bufIndex++; - } } - buf[bufIndex] = 0; - - taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); } - break; + buf[bufIndex] = 0; + + taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); + } break; case TSDB_DATA_TYPE_GEOMETRY: shellDumpHexValue(buf, val, length); taosFprintfFile(pFile, "%s", buf); @@ -535,12 +533,10 @@ void shellPrintString(const char *str, int32_t width) { if (width == 0) { printf("%s", str); - } - else if (len > width) { + } else if (len > width) { if (width <= 3) { printf("%.*s.", width - 1, str); - } - else { + } else { printf("%.*s...", width - 3, str); } } else { @@ -549,7 +545,7 @@ void shellPrintString(const char *str, int32_t width) { } void shellPrintGeometry(const unsigned char *val, int32_t length, int32_t width) { - if (length == 0) { //empty value + if (length == 0) { // empty value shellPrintString("", width); return; } @@ -565,7 +561,7 @@ void shellPrintGeometry(const unsigned char *val, int32_t length, int32_t width) char *outputWKT = NULL; code = doAsText(val, length, &outputWKT); if (code != TSDB_CODE_SUCCESS) { - shellPrintString(getThreadLocalGeosCtx()->errMsg, width); //should NOT happen + shellPrintString(getThreadLocalGeosCtx()->errMsg, width); // should NOT happen return; } @@ -612,27 +608,26 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t break; case TSDB_DATA_TYPE_FLOAT: if (tsEnableScience) { - printf("%*.7e",width,GET_FLOAT_VAL(val)); + printf("%*.7e", width, GET_FLOAT_VAL(val)); } else { n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.7f", width, GET_FLOAT_VAL(val)); if (n > SHELL_FLOAT_WIDTH) { - - printf("%*.7e", width,GET_FLOAT_VAL(val)); + printf("%*.7e", width, GET_FLOAT_VAL(val)); } else { - printf("%s", buf); + printf("%s", buf); } } break; case TSDB_DATA_TYPE_DOUBLE: if (tsEnableScience) { - snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15e", width,GET_DOUBLE_VAL(val)); + snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15e", width, GET_DOUBLE_VAL(val)); printf("%s", buf); } else { n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15f", width, GET_DOUBLE_VAL(val)); if (n > SHELL_DOUBLE_WIDTH) { - printf("%*.15e", width, GET_DOUBLE_VAL(val)); + printf("%*.15e", width, GET_DOUBLE_VAL(val)); } else { - printf("%*s", width,buf); + printf("%*s", width, buf); } } break; @@ -905,7 +900,7 @@ void shellReadHistory() { TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_READ | TD_FILE_STREAM); if (pFile == NULL) return; - char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); + char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); int32_t read_size = 0; while ((read_size = taosGetsFile(pFile, TSDB_MAX_ALLOWED_SQL_LEN, line)) != -1) { line[read_size - 1] = '\0'; @@ -922,8 +917,8 @@ void shellReadHistory() { taosMemoryFreeClear(line); taosCloseFile(&pFile); int64_t file_size; - if (taosStatFile(pHistory->file, &file_size, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) { - TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC); + if (taosStatFile(pHistory->file, &file_size, NULL, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) { + TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC); if (pFile == NULL) return; int32_t endIndex = pHistory->hstart; if (endIndex != 0) { @@ -945,7 +940,7 @@ void shellReadHistory() { void shellWriteHistory() { SShellHistory *pHistory = &shell.history; if (pHistory->hend == pHistory->hstart) return; - TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_APPEND); + TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_APPEND); if (pFile == NULL) return; for (int32_t i = pHistory->hstart; i != pHistory->hend;) { @@ -991,7 +986,7 @@ void shellSourceFile(const char *file) { tstrncpy(fullname, file, PATH_MAX); } - sprintf(sourceFileCommand, "source %s;",fullname); + sprintf(sourceFileCommand, "source %s;", fullname); shellRecordCommandToHistory(sourceFileCommand); TdFilePtr pFile = taosOpenFile(fullname, TD_FILE_READ | TD_FILE_STREAM); @@ -1001,7 +996,7 @@ void shellSourceFile(const char *file) { return; } - char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); + char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); while ((read_len = taosGetsFile(pFile, TSDB_MAX_ALLOWED_SQL_LEN, line)) != -1) { if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; line[--read_len] = '\0'; @@ -1044,7 +1039,8 @@ void shellGetGrantInfo() { int32_t code = taos_errno(tres); if (code != TSDB_CODE_SUCCESS) { - if (code != TSDB_CODE_OPS_NOT_SUPPORT && code != TSDB_CODE_MND_NO_RIGHTS && code != TSDB_CODE_PAR_PERMISSION_DENIED) { + if (code != TSDB_CODE_OPS_NOT_SUPPORT && code != TSDB_CODE_MND_NO_RIGHTS && + code != TSDB_CODE_PAR_PERMISSION_DENIED) { fprintf(stderr, "Failed to check Server Edition, Reason:0x%04x:%s\r\n\r\n", code, taos_errstr(tres)); } return; @@ -1080,7 +1076,8 @@ void shellGetGrantInfo() { } else if (strcmp(expiretime, "unlimited") == 0) { fprintf(stdout, "Server is Enterprise %s Edition, %s and will never expire.\r\n", serverVersion, sinfo); } else { - fprintf(stdout, "Server is Enterprise %s Edition, %s and will expire at %s.\r\n", serverVersion, sinfo, expiretime); + fprintf(stdout, "Server is Enterprise %s Edition, %s and will expire at %s.\r\n", serverVersion, sinfo, + expiretime); } taos_free_result(tres); @@ -1123,9 +1120,9 @@ void *shellCancelHandler(void *arg) { #ifdef WEBSOCKET } #endif - #ifdef WINDOWS +#ifdef WINDOWS printf("\n%s", shell.info.promptHeader); - #endif +#endif } return NULL; @@ -1165,8 +1162,7 @@ void *shellThreadLoop(void *arg) { } int32_t shellExecute() { - printf(shell.info.clientVersion, shell.info.cusName, - taos_get_client_info(), shell.info.cusName); + printf(shell.info.clientVersion, shell.info.cusName, taos_get_client_info(), shell.info.cusName); fflush(stdout); SShellArgs *pArgs = &shell.args; @@ -1233,13 +1229,13 @@ int32_t shellExecute() { taosSetSignal(SIGTERM, shellQueryInterruptHandler); taosSetSignal(SIGHUP, shellQueryInterruptHandler); taosSetSignal(SIGINT, shellQueryInterruptHandler); - + #ifdef WEBSOCKET if (!shell.args.restful && !shell.args.cloud) { #endif #ifndef WINDOWS printfIntroduction(); -#endif +#endif shellGetGrantInfo(); #ifdef WEBSOCKET } diff --git a/utils/test/c/tmqDemo.c b/utils/test/c/tmqDemo.c index ce069c2b05..64f536433e 100644 --- a/utils/test/c/tmqDemo.c +++ b/utils/test/c/tmqDemo.c @@ -221,7 +221,7 @@ int64_t getDirectorySize(char* dir) { totalSize += subDirSize; } else if (0 == strcmp(strchr(fileName, '.'), ".log")) { // only calc .log file size, and not include .idx file int64_t file_size = 0; - taosStatFile(subdir, &file_size, NULL); + taosStatFile(subdir, &file_size, NULL, NULL); totalSize += file_size; } } @@ -702,4 +702,3 @@ int main(int32_t argc, char* argv[]) { taosCloseFile(&g_fp); return 0; } - From 9e4da6c089b5db3f08716961983053476c207206 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 7 Aug 2023 17:14:58 +0800 Subject: [PATCH 06/78] s3/config: parsing s3 configuration --- source/common/src/tglobal.c | 41 +++++++++++++++++++++++---- source/dnode/vnode/src/vnd/vnodeCos.c | 8 +++--- 2 files changed, 39 insertions(+), 10 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 1f6d0800a5..fbc98715f0 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -236,8 +236,9 @@ bool tsFilterScalarMode = false; int32_t tsKeepTimeOffset = 0; // latency of data migration char tsS3Endpoint[TSDB_FQDN_LEN] = ""; -char tsS3AcessKeyId[TSDB_FQDN_LEN] = ""; -char tsS3AcessKeySecret[TSDB_FQDN_LEN] = ""; +char tsS3AccessKey[TSDB_FQDN_LEN] = ""; +char tsS3AccessKeyId[TSDB_FQDN_LEN] = ""; +char tsS3AccessKeySecret[TSDB_FQDN_LEN] = ""; char tsS3BucketName[TSDB_FQDN_LEN] = ""; char tsS3AppId[TSDB_FQDN_LEN] = ""; int8_t tsS3Enabled = false; @@ -263,6 +264,35 @@ int32_t taosSetTfsCfg(SConfig *pCfg) { int32_t taosSetTfsCfg(SConfig *pCfg); #endif +int32_t taosSetS3Cfg(SConfig *pCfg) { + tstrncpy(tsS3AccessKey, cfgGetItem(pCfg, "s3Accesskey")->str, TSDB_FQDN_LEN); + char *colon = strchr(tsS3AccessKey, ':'); + if (!colon) { + uError("invalid access key:%s", tsS3AccessKey); + return -1; + } + *colon = '\0'; + tstrncpy(tsS3AccessKeyId, tsS3AccessKey, TSDB_FQDN_LEN); + tstrncpy(tsS3AccessKeySecret, colon + 1, TSDB_FQDN_LEN); + tstrncpy(tsS3Endpoint, cfgGetItem(pCfg, "s3Endpoint")->str, TSDB_FQDN_LEN); + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); + char *cos = strstr(tsS3Endpoint, "cos."); + if (cos) { + char *appid = strrchr(tsS3BucketName, '-'); + if (!appid) { + uError("failed to locate appid in bucket:%s", tsS3BucketName); + return -1; + } else { + tstrncpy(tsS3AppId, appid + 1, TSDB_FQDN_LEN); + } + } + if (tsS3BucketName[0] != '<' && tsDiskCfgNum > 1) { + tsS3Enabled = true; + } + + return 0; +} + struct SConfig *taosGetCfg() { return tsCfg; } @@ -582,6 +612,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3Accesskey", tsS3AccessKey, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1; GRANT_CFG_ADD; @@ -972,8 +1004,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32; tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32; - tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); - GRANT_CFG_GET; return 0; } @@ -1298,8 +1328,6 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp); snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port); cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype); - } else if (strcasecmp("s3BucketName", name) == 0) { - tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); } else if (strcasecmp("sDebugFlag", name) == 0) { sDebugFlag = cfgGetItem(pCfg, "sDebugFlag")->i32; } else if (strcasecmp("smaDebugFlag", name) == 0) { @@ -1498,6 +1526,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile if (taosSetServerCfg(tsCfg)) return -1; if (taosSetReleaseCfg(tsCfg)) return -1; if (taosSetTfsCfg(tsCfg) != 0) return -1; + if (taosSetS3Cfg(tsCfg) != 0) return -1; } taosSetSystemCfg(tsCfg); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index bac38f7c35..a40e046972 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -7,8 +7,8 @@ #include "cos_log.h" extern char tsS3Endpoint[]; -extern char tsS3AcessKeyId[]; -extern char tsS3AcessKeySecret[]; +extern char tsS3AccessKeyId[]; +extern char tsS3AccessKeySecret[]; extern char tsS3BucketName[]; extern char tsS3AppId[]; @@ -41,8 +41,8 @@ static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { cos_config_t *config = options->config; cos_str_set(&config->endpoint, tsS3Endpoint); - cos_str_set(&config->access_key_id, tsS3AcessKeyId); - cos_str_set(&config->access_key_secret, tsS3AcessKeySecret); + cos_str_set(&config->access_key_id, tsS3AccessKeyId); + cos_str_set(&config->access_key_secret, tsS3AccessKeySecret); cos_str_set(&config->appid, tsS3AppId); config->is_cname = is_cname; From 1290f529daf34cb3cfd6fa3cff92b43e94694a10 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 7 Aug 2023 17:17:43 +0800 Subject: [PATCH 07/78] cos/example: turn head object on --- contrib/test/cos/main.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/contrib/test/cos/main.c b/contrib/test/cos/main.c index faaceee2e3..7e5e7c8c8b 100644 --- a/contrib/test/cos/main.c +++ b/contrib/test/cos/main.c @@ -530,7 +530,12 @@ void test_head_object() { s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); print_headers(resp_headers); if (cos_status_is_ok(s)) { - printf("head object succeeded\n"); + long size = 0; + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + size = atol(content_length_str); + } + printf("head object succeeded: %ld\n", size); } else { printf("head object failed\n"); } @@ -3045,7 +3050,7 @@ int main(int argc, char *argv[]) { // test_object(); // test_put_object_with_limit(); // test_get_object_with_limit(); - // test_head_object(); + test_head_object(); // test_gen_object_url(); // test_list_objects(); // test_list_directory(); From 864d07e1bcf1168c4eb728fdb9ef3bb2f25f5ff2 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 7 Aug 2023 19:39:06 +0800 Subject: [PATCH 08/78] feat:[TD-24559]support geomety type in schemaless --- source/client/CMakeLists.txt | 4 +-- source/client/inc/clientSml.h | 1 + source/client/src/clientSml.c | 1 + source/client/src/clientSmlLine.c | 39 ++++++++++++++++++---- source/client/test/CMakeLists.txt | 2 +- source/dnode/mnode/impl/src/mndSubscribe.c | 2 +- source/libs/parser/src/parInsertSml.c | 14 ++++++-- source/libs/parser/src/parInsertUtil.c | 2 +- 8 files changed, 50 insertions(+), 15 deletions(-) diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt index d0cfd38fb9..e45ee9c932 100644 --- a/source/client/CMakeLists.txt +++ b/source/client/CMakeLists.txt @@ -16,7 +16,7 @@ target_include_directories( target_link_libraries( taos INTERFACE api - PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom + PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom geometry ) if(TD_DARWIN_ARM64) @@ -57,7 +57,7 @@ target_include_directories( target_link_libraries( taos_static INTERFACE api - PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom + PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom geometry ) if(${BUILD_TEST}) diff --git a/source/client/inc/clientSml.h b/source/client/inc/clientSml.h index 040064560c..11376052b6 100644 --- a/source/client/inc/clientSml.h +++ b/source/client/inc/clientSml.h @@ -33,6 +33,7 @@ extern "C" { #include "ttime.h" #include "ttypes.h" #include "cJSON.h" +#include "geosWrapper.h" #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) # define expect(expr,value) (__builtin_expect ((expr),(value)) ) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index ffff3df5d0..65175110cf 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1191,6 +1191,7 @@ void freeSSmlKv(void *data) { SSmlKv *kv = (SSmlKv *)data; if (kv->keyEscaped) taosMemoryFree((void *)(kv->key)); if (kv->valueEscaped) taosMemoryFree((void *)(kv->value)); + if (kv->type == TSDB_DATA_TYPE_GEOMETRY) geosFreeBuffer((void *)(kv->value)); } void smlDestroyInfo(SSmlHandle *info) { diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index 1ee2cfbedf..9c0b2d7688 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -102,6 +102,30 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { return TSDB_CODE_TSC_INVALID_VALUE; } + if (pVal->value[0] == 'g' || pVal->value[0] == 'G') { // geometry + if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= sizeof("POINT")+3) { + int32_t code = initCtxGeomFromText(); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + char* tmp = taosMemoryCalloc(pVal->length, 1); + memcmp(tmp, pVal->value + 2, pVal->length - 3); + code = doGeomFromText(tmp, (unsigned char **)&pVal->value, &pVal->length); + taosMemoryFree(tmp); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + pVal->type = TSDB_DATA_TYPE_GEOMETRY; + if (pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { + geosFreeBuffer((void*)(pVal->value)); + return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; + } + return TSDB_CODE_SUCCESS; + } + return TSDB_CODE_TSC_INVALID_VALUE; + } + if (pVal->value[0] == 't' || pVal->value[0] == 'T') { if (pVal->length == 1 || (pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R') && @@ -390,7 +414,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin SSmlKv kv = {.key = tag->name, .keyLen = strlen(tag->name), .type = tag->type}; if (tag->type == TSDB_DATA_TYPE_NCHAR) { kv.length = (tag->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; - } else if (tag->type == TSDB_DATA_TYPE_BINARY) { + } else if (tag->type == TSDB_DATA_TYPE_BINARY || tag->type == TSDB_DATA_TYPE_GEOMETRY) { kv.length = tag->bytes - VARSTR_HEADER_SIZE; } taosArrayPush((*tmp)->cols, &kv); @@ -663,14 +687,15 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine if (info->dataFormat) { uDebug("SML:0x%" PRIx64 " smlParseInfluxString format true, ts:%" PRId64, info->id, ts); ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 0); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - ret = smlBuildRow(info->currTableDataCtx); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + if (ret == TSDB_CODE_SUCCESS) { + ret = smlBuildRow(info->currTableDataCtx); } + clearColValArray(info->currTableDataCtx->pValues); + if (unlikely(ret != TSDB_CODE_SUCCESS)) { + smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL); + return ret; + } } else { uDebug("SML:0x%" PRIx64 " smlParseInfluxString format false, ts:%" PRId64, info->id, ts); taosArraySet(elements->colArray, 0, &kv); diff --git a/source/client/test/CMakeLists.txt b/source/client/test/CMakeLists.txt index 34c377c6ea..91f0d1eef8 100644 --- a/source/client/test/CMakeLists.txt +++ b/source/client/test/CMakeLists.txt @@ -20,7 +20,7 @@ TARGET_LINK_LIBRARIES( ADD_EXECUTABLE(smlTest smlTest.cpp) TARGET_LINK_LIBRARIES( smlTest - PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom + PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom geometry ) TARGET_INCLUDE_DIRECTORIES( diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 6f50b9ff9f..166e67b15b 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -489,7 +489,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR SMqVgEp *pVgEp = taosArrayGetP(pConsumerEpNew->vgs, i); if(pVgEp->vgId == d1->vgId){ jump = true; - mInfo("pSub->offsetRows jump, because consumer id:%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId); + mInfo("pSub->offsetRows jump, because consumer id:0x%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId); break; } } diff --git a/source/libs/parser/src/parInsertSml.c b/source/libs/parser/src/parInsertSml.c index 78b05b6df5..577b8961a7 100644 --- a/source/libs/parser/src/parInsertSml.c +++ b/source/libs/parser/src/parInsertSml.c @@ -22,7 +22,7 @@ static void clearColValArray(SArray* pCols) { int32_t num = taosArrayGetSize(pCols); for (int32_t i = 0; i < num; ++i) { SColVal* pCol = taosArrayGet(pCols, i); - if (TSDB_DATA_TYPE_NCHAR == pCol->type) { + if (TSDB_DATA_TYPE_NCHAR == pCol->type || TSDB_DATA_TYPE_GEOMETRY == pCol->type) { taosMemoryFreeClear(pCol->value.pData); } pCol->flag = CV_FLAG_NONE; @@ -237,9 +237,13 @@ int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* data, int32 } pVal->value.pData = pUcs4; pVal->value.nData = len; - } else if (kv->type == TSDB_DATA_TYPE_BINARY || kv->type == TSDB_DATA_TYPE_GEOMETRY) { + } else if (kv->type == TSDB_DATA_TYPE_BINARY) { pVal->value.nData = kv->length; pVal->value.pData = (uint8_t*)kv->value; + } else if (kv->type == TSDB_DATA_TYPE_GEOMETRY) { + pVal->value.nData = kv->length; + pVal->value.pData = taosMemoryMalloc(kv->length); + memcpy(pVal->value.pData, (uint8_t*)kv->value, kv->length); } else { memcpy(&pVal->value.val, &(kv->value), kv->length); } @@ -364,9 +368,13 @@ int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSc } pVal->value.pData = pUcs4; pVal->value.nData = len; - } else if (kv->type == TSDB_DATA_TYPE_BINARY || kv->type == TSDB_DATA_TYPE_GEOMETRY) { + } else if (kv->type == TSDB_DATA_TYPE_BINARY) { pVal->value.nData = kv->length; pVal->value.pData = (uint8_t*)kv->value; + } else if (kv->type == TSDB_DATA_TYPE_GEOMETRY) { + pVal->value.nData = kv->length; + pVal->value.pData = taosMemoryMalloc(kv->length); + memcpy(pVal->value.pData, (uint8_t*)kv->value, kv->length); } else { memcpy(&pVal->value.val, &(kv->value), kv->length); } diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c index de7d154db6..33699ed857 100644 --- a/source/libs/parser/src/parInsertUtil.c +++ b/source/libs/parser/src/parInsertUtil.c @@ -333,7 +333,7 @@ int32_t insGetTableDataCxt(SHashObj* pHash, void* id, int32_t idLen, STableMeta* static void destroyColVal(void* p) { SColVal* pVal = p; - if (TSDB_DATA_TYPE_NCHAR == pVal->type) { + if (TSDB_DATA_TYPE_NCHAR == pVal->type || TSDB_DATA_TYPE_GEOMETRY == pVal->type) { taosMemoryFree(pVal->value.pData); } } From d0335bec974ee1516d74a6bafd196b0fea2859bd Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 8 Aug 2023 09:49:31 +0800 Subject: [PATCH 09/78] feat:[TD-24559]support geomety type in schemaless --- source/client/src/clientSmlLine.c | 2 +- utils/test/c/sml_test.c | 33 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index 9c0b2d7688..d6f405e69d 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -109,7 +109,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { return code; } char* tmp = taosMemoryCalloc(pVal->length, 1); - memcmp(tmp, pVal->value + 2, pVal->length - 3); + memcpy(tmp, pVal->value + 2, pVal->length - 3); code = doGeomFromText(tmp, (unsigned char **)&pVal->value, &pVal->length); taosMemoryFree(tmp); if (code != TSDB_CODE_SUCCESS) { diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c index e4ed6037a3..237bfc5092 100644 --- a/utils/test/c/sml_test.c +++ b/utils/test/c/sml_test.c @@ -1552,12 +1552,45 @@ int sml_ts3724_Test() { return code; } +int sml_td24559_Test() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + + TAOS_RES *pRes = taos_query(taos, "drop database if exists td24559"); + taos_free_result(pRes); + + pRes = taos_query(taos, "create database if not exists td24559"); + taos_free_result(pRes); + + const char *sql[] = { + "stb,t1=1 f1=283i32,f2=g\"Point(4.343 89.342)\" 1632299372000", + "stb,t1=1 f2=G\"Point(4.343 89.342)\",f1=106i32 1632299373000", + "stb,t2=1 f2=G\"Point(4.343 89.342)\",f1=106i32 1632299374000", + "stb,t1=1 f1=106i32,f2=G\"GEOMETRYCOLLECTION (MULTIPOINT((0 0), (1 1)), POINT(3 4), LINESTRING(2 3, 3 4))\" 1632299378000", + }; + + pRes = taos_query(taos, "use td24559"); + taos_free_result(pRes); + + pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_MILLI_SECONDS); + + int code = taos_errno(pRes); + printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes)); + taos_free_result(pRes); + + taos_close(taos); + + return code; +} + int main(int argc, char *argv[]) { if (argc == 2) { taos_options(TSDB_OPTION_CONFIGDIR, argv[1]); } int ret = 0; + ret = sml_td24559_Test(); + ASSERT(!ret); ret = sml_td24070_Test(); ASSERT(!ret); ret = sml_td23881_Test(); From 607132c18a4e00607a4fe64c2365a4e68c222d0e Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 8 Aug 2023 10:33:55 +0800 Subject: [PATCH 10/78] feat:[TD-24559]support geomety type in schemaless --- source/client/inc/clientSml.h | 2 +- source/client/src/clientSmlLine.c | 6 +++--- tests/system-test/2-query/sml.py | 5 +++++ 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/source/client/inc/clientSml.h b/source/client/inc/clientSml.h index 11376052b6..1839c14894 100644 --- a/source/client/inc/clientSml.h +++ b/source/client/inc/clientSml.h @@ -193,7 +193,7 @@ typedef struct { // SArray *preLineTagKV; SArray *maxTagKVs; - SArray *masColKVs; + SArray *maxColKVs; SSmlLineInfo preLine; STableMeta *currSTableMeta; diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index d6f405e69d..558c5f4ddb 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -421,7 +421,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin } } info->currSTableMeta = (*tmp)->tableMeta; - info->masColKVs = (*tmp)->cols; + info->maxColKVs = (*tmp)->cols; } } @@ -536,13 +536,13 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin freeSSmlKv(&kv); return TSDB_CODE_SUCCESS; } - if (cnt >= taosArrayGetSize(info->masColKVs)) { + if (cnt >= taosArrayGetSize(info->maxColKVs)) { info->dataFormat = false; info->reRun = true; freeSSmlKv(&kv); return TSDB_CODE_SUCCESS; } - SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->masColKVs, cnt); + SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->maxColKVs, cnt); if (kv.type != maxKV->type) { info->dataFormat = false; info->reRun = true; diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py index b3aeb72194..cae012ece1 100644 --- a/tests/system-test/2-query/sml.py +++ b/tests/system-test/2-query/sml.py @@ -110,6 +110,11 @@ class TDTestCase: tdSql.query(f"select * from ts3724.`stb2.`") tdSql.checkRows(1) + + # tdSql.query(f"select * from td24559.stb order by _ts") + # tdSql.checkRows(4) + # tdSql.checkData(0, 2, "POINT (4.343000 89.342000)") + # tdSql.checkData(3, 2, "GEOMETRYCOLLECTION (MULTIPOINT ((0.000000 0.000000), (1.000000 1.000000)), POINT (3.000000 4.000000), LINESTRING (2.000000 3.000000, 3.000000 4.000000))") return def run(self): From d322cfce50b4be2c76219aa1e5d6c807b1694312 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 8 Aug 2023 16:28:51 +0800 Subject: [PATCH 11/78] fix:add hashIteratorCancel for hash iterator --- source/client/src/clientSml.c | 5 +++++ source/dnode/vnode/src/sma/smaRollup.c | 1 + source/dnode/vnode/src/tq/tqRead.c | 1 + 3 files changed, 7 insertions(+) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 65175110cf..cad32842a0 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1073,6 +1073,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { return 0; end: + taosHashCancelIterate(info->superTables, tmp); taosHashCleanup(hashTmp); taosMemoryFreeClear(pTableMeta); catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1); @@ -1434,6 +1435,7 @@ static int32_t smlInsertData(SSmlHandle *info) { code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE); if(code != TSDB_CODE_SUCCESS){ taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); return code; } @@ -1442,6 +1444,7 @@ static int32_t smlInsertData(SSmlHandle *info) { if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " catalogGetTableHashVgroup failed. table name: %s", info->id, tableData->childTableName); taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); return code; } taosHashPut(info->pVgHash, (const char *)&vg.vgId, sizeof(vg.vgId), (char *)&vg, sizeof(vg)); @@ -1451,6 +1454,7 @@ static int32_t smlInsertData(SSmlHandle *info) { if (unlikely(NULL == pMeta || NULL == (*pMeta)->tableMeta)) { uError("SML:0x%" PRIx64 " NULL == pMeta. table name: %s", info->id, tableData->childTableName); taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); return TSDB_CODE_SML_INTERNAL_ERROR; } @@ -1466,6 +1470,7 @@ static int32_t smlInsertData(SSmlHandle *info) { taosMemoryFree(measure); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlBindData failed", info->id); + taosHashCancelIterate(info->childTables, oneTable); return code; } oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, oneTable); diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 9fd4938448..2813f9059c 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -905,6 +905,7 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, int64_t version, void *pReq, void *pMsg, tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); if (tdExecuteRSmaAsync(pSma, version, pMsg, len, inputType, *pTbSuid) < 0) { smaError("vgId:%d, failed to process rsma submit exec 2 since: %s", SMA_VID(pSma), terrstr()); + taosHashCancelIterate(uidStore.uidHash, pIter); goto _err; } } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 9b8f1781cb..046502b49f 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -1095,6 +1095,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { if(ret != TDB_CODE_SUCCESS) { tqError("qGetTableList in tqUpdateTbUidList error:%d handle %s consumer:0x%" PRIx64, ret, pTqHandle->subKey, pTqHandle->consumerId); taosArrayDestroy(list); + taosHashCancelIterate(pTq->pHandle, pIter); return ret; } tqReaderSetTbUidList(pTqHandle->execHandle.pTqReader, list, NULL); From 16e015253b2c3f01981914f34743794e1d203e94 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:10:25 +0800 Subject: [PATCH 12/78] s3/mxml: remove os external dependency --- contrib/CMakeLists.txt | 31 ++++++++++++++++++++++++++++--- contrib/test/cos/CMakeLists.txt | 4 ++-- source/dnode/vnode/CMakeLists.txt | 2 +- 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index df9519d00f..db4d359938 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -124,6 +124,9 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) + file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif(${BUILD_WITH_COS}) @@ -157,6 +160,21 @@ if(${BUILD_GEOS}) cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif() +# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=unused-function") +# include(ExternalProject) +# ExternalProject_Add(mxml +# GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git +# GIT_TAG release-2.10 +# SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" +# #BINARY_DIR "" +# BUILD_IN_SOURCE TRUE +# CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build +# BUILD_COMMAND make +# INSTALL_COMMAND make install +# TEST_COMMAND "" +# ) + + # download dependencies configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . @@ -355,7 +373,10 @@ endif() # cos if(${BUILD_WITH_COS}) + if(NOT ${TD_WINDOWS}) + #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) + INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) @@ -363,11 +384,15 @@ if(${BUILD_WITH_COS}) add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL) target_include_directories( - cos_c_sdk - PUBLIC $ - ) + cos_c_sdk + PUBLIC $ + ) set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) + + else() + + endif(NOT ${TD_WINDOWS}) endif(${BUILD_WITH_COS}) # lucene diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 77c57e5a65..3eb484c2c5 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -39,11 +39,11 @@ target_include_directories( find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -find_library(MINIXML_LIBRARY mxml) +#find_library(MINIXML_LIBRARY mxml) find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) target_link_libraries(cosTest ${APR_LIBRARY}) -target_link_libraries(cosTest ${MINIXML_LIBRARY}) +target_link_libraries(cosTest mxml) target_link_libraries(cosTest ${CURL_LIBRARY}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 0612f924f5..eea81ea3d2 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -137,7 +137,7 @@ endif() find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -find_library(MINIXML_LIBRARY mxml) +#find_library(MINIXML_LIBRARY mxml) find_library(CURL_LIBRARY curl) target_link_libraries( From c4f7b5d530a58026b1b1b7b64b535e1e12887ecc Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:23:00 +0800 Subject: [PATCH 13/78] mxml: makefile for mxml --- cmake/mxml_CMakeLists.txt.in | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 cmake/mxml_CMakeLists.txt.in diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in new file mode 100644 index 0000000000..994aa6e2cb --- /dev/null +++ b/cmake/mxml_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# cos +ExternalProject_Add(mxml + GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git + GIT_TAG release-2.10 + SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" + #BINARY_DIR "" + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND make + INSTALL_COMMAND make install + TEST_COMMAND "" +) From 4d1155a5cfe6a329a4bb728e1c4aa3c3e3600a77 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:43:27 +0800 Subject: [PATCH 14/78] curl: makefile for curl --- cmake/curl_CMakeLists.txt.in | 12 ++++++++++++ contrib/test/cos/CMakeLists.txt | 4 ++-- source/dnode/vnode/CMakeLists.txt | 6 +++--- 3 files changed, 17 insertions(+), 5 deletions(-) create mode 100644 cmake/curl_CMakeLists.txt.in diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in new file mode 100644 index 0000000000..a23c5e7bab --- /dev/null +++ b/cmake/curl_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# curl +ExternalProject_Add(curl + GIT_REPOSITORY https://github.com/curl/curl.git + GIT_TAG curl-7_88_1 + SOURCE_DIR "${TD_CONTRIB_DIR}/curl" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 3eb484c2c5..38de8a25e8 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -40,10 +40,10 @@ target_include_directories( find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) -find_library(CURL_LIBRARY curl) +#find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) target_link_libraries(cosTest ${APR_LIBRARY}) target_link_libraries(cosTest mxml) -target_link_libraries(cosTest ${CURL_LIBRARY}) +target_link_libraries(cosTest curl) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index eea81ea3d2..562207268c 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -138,7 +138,7 @@ endif() find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) -find_library(CURL_LIBRARY curl) +#find_library(CURL_LIBRARY curl) target_link_libraries( vnode @@ -164,8 +164,8 @@ target_link_libraries( cos_c_sdk ${APR_UTIL_LIBRARY} ${APR_LIBRARY} - ${MINIXML_LIBRARY} - ${CURL_LIBRARY} + mxml + curl ) IF (TD_GRANT) From 7114ad63e5ac8846a11e221079031ee22bccc4f1 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:54:17 +0800 Subject: [PATCH 15/78] =?UTF-8?q?apr=EF=BC=9Amakefile=20for=20apr=20&=20ap?= =?UTF-8?q?r-util?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cmake/apr-util_CMakeLists.txt.in | 12 ++++++++++++ cmake/apr_CMakeLists.txt.in | 12 ++++++++++++ contrib/test/cos/CMakeLists.txt | 8 ++++---- source/dnode/vnode/CMakeLists.txt | 8 ++++---- 4 files changed, 32 insertions(+), 8 deletions(-) create mode 100644 cmake/apr-util_CMakeLists.txt.in create mode 100644 cmake/apr_CMakeLists.txt.in diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in new file mode 100644 index 0000000000..8471a05db6 --- /dev/null +++ b/cmake/apr-util_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# apr-util +ExternalProject_Add(apr + GIT_REPOSITORY https://github.com/apache/apr-util.git + GIT_TAG 1.5.4 + SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in new file mode 100644 index 0000000000..68b6f39c89 --- /dev/null +++ b/cmake/apr_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# apr +ExternalProject_Add(apr + GIT_REPOSITORY https://github.com/apache/apr.git + GIT_TAG 1.5.2 + SOURCE_DIR "${TD_CONTRIB_DIR}/apr" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 38de8a25e8..2d2e101877 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -37,13 +37,13 @@ target_include_directories( PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" ) -find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) #find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) -target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) -target_link_libraries(cosTest ${APR_LIBRARY}) +target_link_libraries(cosTest apr}) +target_link_libraries(cosTest apr-util}) target_link_libraries(cosTest mxml) target_link_libraries(cosTest curl) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 562207268c..cf7d205c00 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,8 +135,8 @@ else() endif() endif() -find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) #find_library(CURL_LIBRARY curl) @@ -162,8 +162,8 @@ target_link_libraries( # s3 cos_c_sdk - ${APR_UTIL_LIBRARY} - ${APR_LIBRARY} + apr + apr-util mxml curl ) From 2e0519b9609d98a119b8908199c073dd8cb07d9f Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 11:01:42 +0800 Subject: [PATCH 16/78] apr: fix apr & apr-util project names --- cmake/apr-util_CMakeLists.txt.in | 2 +- cmake/apr_CMakeLists.txt.in | 2 +- contrib/test/cos/CMakeLists.txt | 4 ++-- source/dnode/vnode/CMakeLists.txt | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 8471a05db6..c4dd943243 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -1,5 +1,5 @@ # apr-util -ExternalProject_Add(apr +ExternalProject_Add(aprutil-1 GIT_REPOSITORY https://github.com/apache/apr-util.git GIT_TAG 1.5.4 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 68b6f39c89..bfbe8196d3 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -1,5 +1,5 @@ # apr -ExternalProject_Add(apr +ExternalProject_Add(apr-1 GIT_REPOSITORY https://github.com/apache/apr.git GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 2d2e101877..f8804033de 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -43,7 +43,7 @@ target_include_directories( #find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) -target_link_libraries(cosTest apr}) -target_link_libraries(cosTest apr-util}) +target_link_libraries(cosTest apr-1}) +target_link_libraries(cosTest aprutil-1}) target_link_libraries(cosTest mxml) target_link_libraries(cosTest curl) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index cf7d205c00..a219990690 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -162,8 +162,8 @@ target_link_libraries( # s3 cos_c_sdk - apr - apr-util + apr-1 + aprutil-1 mxml curl ) From 57ba106371bbbcbf83b14ce8668b798aae861bbe Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 11:04:53 +0800 Subject: [PATCH 17/78] cos: move cmake prefix path after external building --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index db4d359938..0ef799e9da 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -125,7 +125,6 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) - set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) @@ -374,6 +373,7 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) + set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) From 398567ef4ca524cd74ee3f203ba6512cde7f523d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 11:14:00 +0800 Subject: [PATCH 18/78] contrib/cmake: add apr apr-util, and curl into makefile --- contrib/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 0ef799e9da..053266c533 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -125,6 +125,9 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) From 7e2859ed43e8ebf375a5808b0d35c3c191a725b6 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 13:03:37 +0800 Subject: [PATCH 19/78] apr: use tarball to avoid ./buildconf --- cmake/apr-util_CMakeLists.txt.in | 18 +++++++++++------- cmake/apr_CMakeLists.txt.in | 21 ++++++++++++++------- cmake/curl_CMakeLists.txt.in | 9 ++++----- contrib/CMakeLists.txt | 2 +- source/dnode/vnode/CMakeLists.txt | 16 ++++++++-------- 5 files changed, 38 insertions(+), 28 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index c4dd943243..b81745aeef 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -1,12 +1,16 @@ # apr-util ExternalProject_Add(aprutil-1 - GIT_REPOSITORY https://github.com/apache/apr-util.git - GIT_TAG 1.5.4 + URL https://dlcdn.apache.org//apr/apr-util-1.6.3.tar.gz + URL_HASH SHA256=2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983 + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/apache/apr-util.git + #GIT_TAG 1.5.4 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" - BINARY_DIR "" - #BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" + #BINARY_DIR "" + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build + BUILD_COMMAND make + INSTALL_COMMAND make install TEST_COMMAND "" ) diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index bfbe8196d3..037c2ee6cc 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -1,12 +1,19 @@ # apr ExternalProject_Add(apr-1 - GIT_REPOSITORY https://github.com/apache/apr.git - GIT_TAG 1.5.2 + URL https://dlcdn.apache.org//apr/apr-1.7.4.tar.gz + URL_HASH SHA256=a4137dd82a185076fa50ba54232d920a17c6469c30b0876569e1c2a05ff311d9 + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/apache/apr.git + #GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" - BINARY_DIR "" - #BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" + #BINARY_DIR "${CMAKE_BINARY_DIR}/build" + BUILD_IN_SOURCE TRUE + #CONFIGURE_COMMAND "" + #BUILD_COMMAND "" + #INSTALL_COMMAND "" + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND make + INSTALL_COMMAND make install TEST_COMMAND "" ) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index a23c5e7bab..cec4dda004 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -3,10 +3,9 @@ ExternalProject_Add(curl GIT_REPOSITORY https://github.com/curl/curl.git GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" - BINARY_DIR "" - #BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND make + INSTALL_COMMAND make install TEST_COMMAND "" ) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 053266c533..058b2cf042 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -125,10 +125,10 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif(${BUILD_WITH_COS}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index a219990690..0612f924f5 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,10 +135,10 @@ else() endif() endif() -#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -#find_library(MINIXML_LIBRARY mxml) -#find_library(CURL_LIBRARY curl) +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) target_link_libraries( vnode @@ -162,10 +162,10 @@ target_link_libraries( # s3 cos_c_sdk - apr-1 - aprutil-1 - mxml - curl + ${APR_UTIL_LIBRARY} + ${APR_LIBRARY} + ${MINIXML_LIBRARY} + ${CURL_LIBRARY} ) IF (TD_GRANT) From 56b348abf2e821fd58834566ff878ffc988bbf34 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 13:15:25 +0800 Subject: [PATCH 20/78] curl: use tarball --- cmake/curl_CMakeLists.txt.in | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index cec4dda004..cbfe939219 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -1,7 +1,10 @@ # curl ExternalProject_Add(curl - GIT_REPOSITORY https://github.com/curl/curl.git - GIT_TAG curl-7_88_1 + URL https://curl.se/download/curl-8.2.1.tar.gz + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/curl/curl.git + #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ From 376a2c2520dc3c5887bbb10f17a143ba9aeec407 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 13:31:18 +0800 Subject: [PATCH 21/78] curl: with openssl building --- cmake/curl_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index cbfe939219..e411cd893c 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -7,7 +7,7 @@ ExternalProject_Add(curl #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-openssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" From b08d5b4d42de9ae814c695478d7eee9c28616573 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 14:02:10 +0800 Subject: [PATCH 22/78] mxml: add include dir to vnode --- source/dnode/vnode/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 0612f924f5..6c107e0a22 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -195,6 +195,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + PUBLIC "${CMAKE_BINARY_DIR}/build/include" ) if(${BUILD_TEST}) From 30cbbc425fdf2a17574c77bea6846ca15af6ba75 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 14:31:17 +0800 Subject: [PATCH 23/78] cos: new update command to build every cmake --- cmake/apr-util_CMakeLists.txt.in | 1 + cmake/apr_CMakeLists.txt.in | 5 +---- cmake/curl_CMakeLists.txt.in | 3 ++- cmake/mxml_CMakeLists.txt.in | 1 + 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index b81745aeef..ee30787cb6 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -9,6 +9,7 @@ ExternalProject_Add(aprutil-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" #BINARY_DIR "" BUILD_IN_SOURCE TRUE + UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 037c2ee6cc..fa124de62c 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -7,11 +7,8 @@ ExternalProject_Add(apr-1 #GIT_REPOSITORY https://github.com/apache/apr.git #GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" - #BINARY_DIR "${CMAKE_BINARY_DIR}/build" BUILD_IN_SOURCE TRUE - #CONFIGURE_COMMAND "" - #BUILD_COMMAND "" - #INSTALL_COMMAND "" + UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index e411cd893c..47c4fd72a1 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -7,7 +7,8 @@ ExternalProject_Add(curl #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-openssl + UPDATE_COMMAND "" + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 994aa6e2cb..12c9ea7d89 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -5,6 +5,7 @@ ExternalProject_Add(mxml SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" #BINARY_DIR "" BUILD_IN_SOURCE TRUE + UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ BUILD_COMMAND make INSTALL_COMMAND make install From 5d0edcd17b16bc557087a7f148d9f6b2d69d39d4 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 15:41:23 +0800 Subject: [PATCH 24/78] cos: use /usr/local as prefix instead of debug/build --- cmake/apr-util_CMakeLists.txt.in | 3 ++- cmake/apr_CMakeLists.txt.in | 3 ++- cmake/curl_CMakeLists.txt.in | 3 ++- cmake/mxml_CMakeLists.txt.in | 3 ++- contrib/CMakeLists.txt | 4 ++-- 5 files changed, 10 insertions(+), 6 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index ee30787cb6..fc4f92858c 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -10,7 +10,8 @@ ExternalProject_Add(aprutil-1 #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build + CONFIGURE_COMMAND ./configure --with-apr=/usr/local/ BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index fa124de62c..57e2014c31 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -9,7 +9,8 @@ ExternalProject_Add(apr-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 47c4fd72a1..fcd16a0518 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -8,7 +8,8 @@ ExternalProject_Add(curl SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl + CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 12c9ea7d89..cdd3e5b301 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,8 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 058b2cf042..507928cbe9 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -124,7 +124,7 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + #file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -376,7 +376,7 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) + #set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) From 93ce558abf20e429980a6901344098bf16a0494c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 15:49:34 +0800 Subject: [PATCH 25/78] apu: fix with-apr config --- cmake/apr-util_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index fc4f92858c..96a8b3ef75 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build - CONFIGURE_COMMAND ./configure --with-apr=/usr/local/ + CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" From bc64d5f769f6d5fdb7baf7b43e81ef2708fa04b8 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 16:53:19 +0800 Subject: [PATCH 26/78] cos: use ~/local as prefix for building --- cmake/apr-util_CMakeLists.txt.in | 4 ++-- cmake/apr_CMakeLists.txt.in | 4 ++-- cmake/curl_CMakeLists.txt.in | 4 ++-- cmake/mxml_CMakeLists.txt.in | 4 ++-- contrib/CMakeLists.txt | 4 ++-- tests/parallel_test/container_build.sh | 1 + 6 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 96a8b3ef75..1ae52c69af 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -10,8 +10,8 @@ ExternalProject_Add(aprutil-1 #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build - CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ --with-apr=$ENV{HOME}/local + #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 57e2014c31..1df68919ae 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -9,8 +9,8 @@ ExternalProject_Add(apr-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ - CONFIGURE_COMMAND ./configure + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ + #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index fcd16a0518..b09e85b9b2 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -8,8 +8,8 @@ ExternalProject_Add(curl SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl - CONFIGURE_COMMAND ./configure --without-ssl + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local --without-ssl + #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index cdd3e5b301..33dc48ab4e 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,8 +6,8 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ - CONFIGURE_COMMAND ./configure + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local + #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 507928cbe9..cc93226d68 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -124,7 +124,7 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) - #file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + file(MAKE_DIRECTORY $ENV{HOME}/local/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -376,7 +376,7 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - #set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) + set(CMAKE_PREFIX_PATH $ENV{HOME}/local) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 5ae061072a..699a4667dd 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -88,6 +88,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/local:/root/local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From 1ce3ef7fd52c3b86c2a2a68481cb10fa9c4b2602 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 17:02:16 +0800 Subject: [PATCH 27/78] cos: fix local/include directory --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index cc93226d68..9feefa6947 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -379,7 +379,7 @@ if(${BUILD_WITH_COS}) set(CMAKE_PREFIX_PATH $ENV{HOME}/local) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) + INCLUDE_DIRECTORIES($ENV{HOME}/local/include) set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) From 4911b6c8558beb4948fef3dbd1d6c46dfe630f81 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 17:07:55 +0800 Subject: [PATCH 28/78] container_build: use local as install dir --- tests/parallel_test/container_build.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 699a4667dd..8de8f377fd 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,6 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/local:/root/local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From a5ccc3e8aa39f91a85931129dffc6aeea8e34767 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 17:53:25 +0800 Subject: [PATCH 29/78] apr: make apr, apu, curl build always --- cmake/apr-util_CMakeLists.txt.in | 3 ++- cmake/apr_CMakeLists.txt.in | 4 +++- cmake/curl_CMakeLists.txt.in | 3 ++- cmake/mxml_CMakeLists.txt.in | 2 +- contrib/CMakeLists.txt | 15 --------------- 5 files changed, 8 insertions(+), 19 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 1ae52c69af..c64e4ffcdb 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -9,7 +9,8 @@ ExternalProject_Add(aprutil-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" #BINARY_DIR "" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ --with-apr=$ENV{HOME}/local #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 1df68919ae..bae8cfe0a6 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -8,7 +8,9 @@ ExternalProject_Add(apr-1 #GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + UPDATE_DISCONNECTED TRUE + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ #CONFIGURE_COMMAND ./configure BUILD_COMMAND make diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index b09e85b9b2..27457ffdbc 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -7,7 +7,8 @@ ExternalProject_Add(curl #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local --without-ssl #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 33dc48ab4e..f9b7e8e642 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -5,7 +5,7 @@ ExternalProject_Add(mxml SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" #BINARY_DIR "" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local #CONFIGURE_COMMAND ./configure BUILD_COMMAND make diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 9feefa6947..3fb7b93abe 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -162,21 +162,6 @@ if(${BUILD_GEOS}) cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif() -# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=unused-function") -# include(ExternalProject) -# ExternalProject_Add(mxml -# GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git -# GIT_TAG release-2.10 -# SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" -# #BINARY_DIR "" -# BUILD_IN_SOURCE TRUE -# CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build -# BUILD_COMMAND make -# INSTALL_COMMAND make install -# TEST_COMMAND "" -# ) - - # download dependencies configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . From a853d9d40cd9c937417d90692ad62c58e020e486 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 18:21:55 +0800 Subject: [PATCH 30/78] mxml: use ~/local/include --- source/dnode/vnode/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 6c107e0a22..e6af282d10 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -195,7 +195,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "${CMAKE_BINARY_DIR}/build/include" + PUBLIC "$ENV{HOME}/local/include" ) if(${BUILD_TEST}) From ca5571d0d6fb05b622e93d90bc3d5f30077ad1ec Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 18:48:59 +0800 Subject: [PATCH 31/78] config: fix default configs --- source/common/src/tglobal.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index fbc98715f0..91ab9f62d5 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -266,6 +266,9 @@ int32_t taosSetTfsCfg(SConfig *pCfg); int32_t taosSetS3Cfg(SConfig *pCfg) { tstrncpy(tsS3AccessKey, cfgGetItem(pCfg, "s3Accesskey")->str, TSDB_FQDN_LEN); + if (tsS3AccessKey[0] == '<') { + return 0; + } char *colon = strchr(tsS3AccessKey, ':'); if (!colon) { uError("invalid access key:%s", tsS3AccessKey); From cd63e814500cdd9138e674831e0bbef664fc2b75 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:18:12 +0800 Subject: [PATCH 32/78] cos: separate building phase for apr & apr-util --- contrib/CMakeLists.txt | 42 +++++++++++++++++++++++++------ source/dnode/vnode/CMakeLists.txt | 2 +- 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 3fb7b93abe..e8f6c98efe 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -6,6 +6,35 @@ function(cat IN_FILE OUT_FILE) file(APPEND ${OUT_FILE} "${CONTENTS}") endfunction(cat IN_FILE OUT_FILE) +set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") +configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + +if(${BUILD_WITH_COS}) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local/) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) +endif(${BUILD_WITH_COS}) + +configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") +execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +execute_process(COMMAND "${CMAKE_COMMAND}" --build . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + +set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2") +configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) + +if(${BUILD_WITH_COS}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +endif(${BUILD_WITH_COS}) + +configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") +execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +execute_process(COMMAND "${CMAKE_COMMAND}" --build . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -124,11 +153,10 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY $ENV{HOME}/local/) - cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif(${BUILD_WITH_COS}) @@ -361,10 +389,10 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - set(CMAKE_PREFIX_PATH $ENV{HOME}/local) + set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES($ENV{HOME}/local/include) + INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local/include) set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index e6af282d10..3cfcc9b716 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -195,7 +195,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "$ENV{HOME}/local/include" + PUBLIC "$ENV{HOME}/.cos-local/include" ) if(${BUILD_TEST}) From bb0b80e42df5c0c3b6af41ccdbd4767ccf3684de Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:24:51 +0800 Subject: [PATCH 33/78] cmake: use .cos-local as prebuilt directory --- cmake/apr-util_CMakeLists.txt.in | 2 +- cmake/apr_CMakeLists.txt.in | 2 +- cmake/curl_CMakeLists.txt.in | 2 +- cmake/mxml_CMakeLists.txt.in | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index c64e4ffcdb..6172be380e 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ --with-apr=$ENV{HOME}/local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ --with-apr=$ENV{HOME}/.cos-local #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index bae8cfe0a6..538b45a7f9 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(apr-1 UPDATE_DISCONNECTED TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 27457ffdbc..1d9d028848 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local --without-ssl + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --without-ssl #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index f9b7e8e642..87b126d8d3 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,7 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install From f3b56a0687e2b4f0127e3eff22787783f4a59154 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:28:23 +0800 Subject: [PATCH 34/78] apu: fix apr-util building --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index e8f6c98efe..d20b205e69 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -26,7 +26,7 @@ set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) if(${BUILD_WITH_COS}) - cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) endif(${BUILD_WITH_COS}) configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") From b739a422e33da2c44842b9afb4674cd1ded0589d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:31:11 +0800 Subject: [PATCH 35/78] container-build: use .cos-local for prebuilt building --- tests/parallel_test/container_build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 8de8f377fd..62254984a9 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,7 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/local:/root/local \ + -v /root/.cos-local:/root/.cos-local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ @@ -89,7 +89,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/local:/root/local \ + -v /root/.cos-local:/root/.cos-local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From c04ada3573e86ddf47a5e5fc67f79557c56b5c66 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 15:28:22 +0800 Subject: [PATCH 36/78] cos: link with static libs --- source/dnode/vnode/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 3cfcc9b716..c036fbc54a 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,6 +135,7 @@ else() endif() endif() +set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) find_library(MINIXML_LIBRARY mxml) From b2e615d4e70a78a8ef80bf4cf3e4ce7af6c9381e Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 10 Aug 2023 17:30:01 +0800 Subject: [PATCH 37/78] enhance: tag scan cursor based block --- include/libs/executor/storageapi.h | 14 +- source/dnode/vnode/src/inc/vnodeInt.h | 2 +- source/dnode/vnode/src/meta/metaQuery.c | 12 +- source/dnode/vnode/src/vnd/vnodeInitApi.c | 4 + source/libs/executor/inc/executil.h | 2 + source/libs/executor/inc/executorInt.h | 4 + source/libs/executor/inc/operator.h | 2 +- source/libs/executor/src/executil.c | 4 +- source/libs/executor/src/operator.c | 2 +- source/libs/executor/src/scanoperator.c | 271 +++++++++++++++++++++- 10 files changed, 298 insertions(+), 19 deletions(-) diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h index 773f373a2d..724d6638db 100644 --- a/include/libs/executor/storageapi.h +++ b/include/libs/executor/storageapi.h @@ -98,6 +98,16 @@ typedef struct SMTbCursor { int8_t paused; } SMTbCursor; +typedef struct SMCtbCursor { + SMeta *pMeta; + void *pCur; + tb_uid_t suid; + void *pKey; + void *pVal; + int kLen; + int vLen; +} SMCtbCursor; + typedef struct SRowBuffPos { void* pRowBuff; void* pKey; @@ -278,13 +288,15 @@ typedef struct SStoreMeta { void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables, int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) & // metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta); - int64_t (*getNumOfRowsInMem)(void* pVnode); /** int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list); int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg); int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list); */ + SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock); + void (*closeCtbCursor)(SMCtbCursor *pCtbCur, int lock); + tb_uid_t (*ctbCursorNext)(SMCtbCursor* pCur); } SStoreMeta; typedef struct SStoreMetaReader { diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index cd7704940b..e3b2d3e41e 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -167,7 +167,7 @@ int metaAddIndexToSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq); int metaDropIndexFromSTable(SMeta* pMeta, int64_t version, SDropIndexReq* pReq); int64_t metaGetTimeSeriesNum(SMeta* pMeta); -SMCtbCursor* metaOpenCtbCursor(SMeta* pMeta, tb_uid_t uid, int lock); +SMCtbCursor* metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock); void metaCloseCtbCursor(SMCtbCursor* pCtbCur, int lock); tb_uid_t metaCtbCursorNext(SMCtbCursor* pCtbCur); SMStbCursor* metaOpenStbCursor(SMeta* pMeta, tb_uid_t uid); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index c26bb45c2b..31c7bc8500 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -408,17 +408,9 @@ _err: return NULL; } -struct SMCtbCursor { - SMeta *pMeta; - TBC *pCur; - tb_uid_t suid; - void *pKey; - void *pVal; - int kLen; - int vLen; -}; -SMCtbCursor *metaOpenCtbCursor(SMeta *pMeta, tb_uid_t uid, int lock) { +SMCtbCursor *metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock) { + SMeta* pMeta = ((SVnode*)pVnode)->pMeta; SMCtbCursor *pCtbCur = NULL; SCtbIdxKey ctbIdxKey; int ret = 0; diff --git a/source/dnode/vnode/src/vnd/vnodeInitApi.c b/source/dnode/vnode/src/vnd/vnodeInitApi.c index 5c8d563d73..dca8dd271c 100644 --- a/source/dnode/vnode/src/vnd/vnodeInitApi.c +++ b/source/dnode/vnode/src/vnd/vnodeInitApi.c @@ -96,6 +96,10 @@ void initMetadataAPI(SStoreMeta* pMeta) { pMeta->metaGetCachedTbGroup = metaGetCachedTbGroup; pMeta->metaPutTbGroupToCache = metaPutTbGroupToCache; + + pMeta->openCtbCursor = metaOpenCtbCursor; + pMeta->closeCtbCursor = metaCloseCtbCursor; + pMeta->ctbCursorNext = metaCtbCursorNext; } void initTqAPI(SStoreTqReader* pTq) { diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 33c9d845b9..f273f63770 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -190,4 +190,6 @@ void printDataBlock(SSDataBlock* pBlock, const char* flag); void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order); void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery); +SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, + SStorageAPI* pStorageAPI); #endif // TDENGINE_EXECUTIL_H diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index fbca5e29f9..cadf367481 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -259,6 +259,10 @@ typedef struct STagScanInfo { SLimitNode* pSlimit; SReadHandle readHandle; STableListInfo* pTableListInfo; + uint64_t suid; + void* pCtbCursor; + SNode* pTagCond; + SNode* pTagIndexCond; } STagScanInfo; typedef enum EStreamScanMode { diff --git a/source/libs/executor/inc/operator.h b/source/libs/executor/inc/operator.h index e6c3405d7f..38cefc1cc5 100644 --- a/source/libs/executor/inc/operator.h +++ b/source/libs/executor/inc/operator.h @@ -81,7 +81,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode*pTagIndexCond, SExecTaskInfo* pTaskInfo); SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index aa0c7945b0..5bb8f8a38b 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -47,8 +47,6 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* p static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI); -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, - SStorageAPI* pStorageAPI); static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; } static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; } @@ -846,7 +844,7 @@ static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, S return -1; } -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, +SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, SStorageAPI* pStorageAPI) { SSDataBlock* pResBlock = createDataBlock(); if (pResBlock == NULL) { diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 8ddcc8fd15..0fc1b77b73 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -380,7 +380,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR return NULL; } - pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTaskInfo); + pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN == type) { SBlockDistScanPhysiNode* pBlockNode = (SBlockDistScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71b0747be8..24ed717c8a 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2688,6 +2688,271 @@ static void doTagScanOneTable(SOperatorInfo* pOperator, const SSDataBlock* pRes, } } +static void tagScanFreeUidTag(void* p) { + STUidTagInfo* pInfo = p; + if (pInfo->pTagVal != NULL) { + taosMemoryFree(pInfo->pTagVal); + } +} + +static int32_t tagScanCreateResultData(SDataType* pType, int32_t numOfRows, SScalarParam* pParam) { + SColumnInfoData* pColumnData = taosMemoryCalloc(1, sizeof(SColumnInfoData)); + if (pColumnData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return terrno; + } + + pColumnData->info.type = pType->type; + pColumnData->info.bytes = pType->bytes; + pColumnData->info.scale = pType->scale; + pColumnData->info.precision = pType->precision; + + int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows, true); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + taosMemoryFree(pColumnData); + return terrno; + } + + pParam->columnData = pColumnData; + pParam->colAlloced = true; + return TSDB_CODE_SUCCESS; +} + +typedef struct STagScanFilterContext { + SHashObj* colHash; + int32_t index; + SArray* cInfoList; +} STagScanFilterContext; + +static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { + SColumnNode* pSColumnNode = NULL; + if (QUERY_NODE_COLUMN == nodeType((*pNode))) { + pSColumnNode = *(SColumnNode**)pNode; + } else if (QUERY_NODE_FUNCTION == nodeType((*pNode))) { + SFunctionNode* pFuncNode = *(SFunctionNode**)(pNode); + if (pFuncNode->funcType == FUNCTION_TYPE_TBNAME) { + pSColumnNode = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == pSColumnNode) { + return DEAL_RES_ERROR; + } + pSColumnNode->colId = -1; + pSColumnNode->colType = COLUMN_TYPE_TBNAME; + pSColumnNode->node.resType.type = TSDB_DATA_TYPE_VARCHAR; + pSColumnNode->node.resType.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE; + nodesDestroyNode(*pNode); + *pNode = (SNode*)pSColumnNode; + } else { + return DEAL_RES_CONTINUE; + } + } else { + return DEAL_RES_CONTINUE; + } + + STagScanFilterContext* pCtx = (STagScanFilterContext*)pContext; + void* data = taosHashGet(pCtx->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId)); + if (!data) { + taosHashPut(pCtx->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode))); + pSColumnNode->slotId = pCtx->index++; + SColumnInfo cInfo = {.colId = pSColumnNode->colId, + .type = pSColumnNode->node.resType.type, + .bytes = pSColumnNode->node.resType.bytes}; + taosArrayPush(pCtx->cInfoList, &cInfo); + } else { + SColumnNode* col = *(SColumnNode**)data; + pSColumnNode->slotId = col->slotId; + } + + return DEAL_RES_CONTINUE; +} + + +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aUidTagIdxs, void* pVnode, SStorageAPI* pAPI) { + int32_t code = 0; + int32_t numOfTables = taosArrayGetSize(aUidTags); + STagScanFilterContext ctx = {0}; + ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + + nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&ctx); + + SSDataBlock* pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); + if (pResBlock == NULL) { + + } + + SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); + taosArrayPush(pBlockList, &pResBlock); + SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; + + SScalarParam output = {0}; + code = tagScanCreateResultData(&type, numOfTables, &output); + if (code != TSDB_CODE_SUCCESS) { + + } + + code = scalarCalculate(pTagCond, pBlockList, &output); + if (code != TSDB_CODE_SUCCESS) { + } + + bool* result = (bool*)output.columnData->pData; + for (int32_t i = 0 ; i < numOfTables; ++i) { + if (result[i]) { + taosArrayPush(aUidTagIdxs, &i); + } + } + + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + colDataDestroy(output.columnData); + taosMemoryFreeClear(output.columnData); +} + +static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { + if (fmIsScanPseudoColumnFunc(pExprInfo->pExpr->_function.functionId)) { // tbname + char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(str, "zsl"); + // if (pUidTagInfo->name != NULL) { + // STR_TO_VARSTR(str, pUidTagInfo->name); + // } else { // name is not retrieved during filter + // pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); + // } + + colDataSetVal(pColInfo, rowIndex, str, false); + } else { + STagVal tagVal = {0}; + tagVal.cid = pExprInfo->base.pParam[0].pCol->colId; + if (pUidTagInfo->pTagVal == NULL) { + colDataSetNULL(pColInfo, rowIndex); + } else { + const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pColInfo->info.type, &tagVal); + + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { + colDataSetNULL(pColInfo, rowIndex); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { + colDataSetVal(pColInfo, rowIndex, p, false); + } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { + char* tmp = taosMemoryMalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1); + varDataSetLen(tmp, tagVal.nData); + memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); + colDataSetVal(pColInfo, rowIndex, tmp, false); + taosMemoryFree(tmp); + } else { + colDataSetVal(pColInfo, rowIndex, (const char*)&tagVal.i64, false); + } + } + } +} + +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aUidTagIdxs, + SStorageAPI* pAPI) { + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + + for (int i = 0; i < taosArrayGetSize(aUidTagIdxs); ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, *(int32_t*)taosArrayGet(aUidTagIdxs, i)); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + } + } + return 0; +} + +#if 0 +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, + SStorageAPI* pAPI) { + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + + int32_t nTbls = taosArrayGetSize(aUidTags); + for (int i = 0; i < nTbls; ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + + // refactor later + if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) { + char str[512]; + + STR_TO_VARSTR(str, "zsl"); + colDataSetVal(pDst, (i), str, false); + } else { // it is a tag value + STagVal val = {0}; + val.cid = pExprInfo[j].base.pParam[0].pCol->colId; + const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pDst->info.type, &val); + + char* data = NULL; + if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) { + data = tTagValToData((const STagVal*)p, false); + } else { + data = (char*)p; + } + colDataSetVal(pDst, i, data, + (data == NULL) || (pDst->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data))); + + if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) && + data != NULL) { + taosMemoryFree(data); + } + } + } + } + return 0; +} +#endif + + +static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStorageAPI* pAPI = &pTaskInfo->storageAPI; + + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + SSDataBlock* pRes = pInfo->pRes; + blockDataCleanup(pRes); + int32_t count = 0; + + if (pInfo->pCtbCursor == NULL) { + pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); + } + SArray* aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); + SArray* aUidTagIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + while (1) { + while (count < pOperator->resultInfo.capacity) { + SMCtbCursor* pCur = pInfo->pCtbCursor; + tb_uid_t uid = pAPI->metaFn.ctbCursorNext(pInfo->pCtbCursor); + if (uid == 0) { + break; + } + STUidTagInfo info = {.uid = uid, .pTagVal = pCur->pVal}; + info.pTagVal = taosMemoryMalloc(pCur->vLen); + memcpy(info.pTagVal, pCur->pVal, pCur->vLen); + taosArrayPush(aUidTags, &info); + } + + int32_t numTables = taosArrayGetSize(aUidTags); + if (numTables != 0 && pInfo->pTagCond != NULL) { + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aUidTagIdxs, pAPI); + } + tagScanFillResultBlock(pOperator, pRes, aUidTags, aUidTagIdxs, pAPI); + if (taosArrayGetSize(aUidTagIdxs) != 0) { + break; + } + taosArrayClearEx(aUidTags, tagScanFreeUidTag); + taosArrayClear(aUidTagIdxs); + } + taosArrayDestroy(aUidTagIdxs); + taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); + pOperator->resultInfo.totalRows += count; + return (pRes->info.rows == 0) ? NULL : pInfo->pRes; +} + static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -2753,7 +3018,7 @@ static void destroyTagScanOperatorInfo(void* param) { } SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, - STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) { + STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -2774,7 +3039,8 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi if (code != TSDB_CODE_SUCCESS) { goto _error; } - + pInfo->pTagCond = pTagCond; + pInfo->pTagIndexCond = pTagIndexCond; pInfo->pTableListInfo = pTableListInfo; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; @@ -2789,6 +3055,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doTagScan, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); + pInfo->suid = pPhyNode->suid; return pOperator; _error: From 20f5e2af5b5f6742466e82611f2e54278af6d776 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 10 Aug 2023 17:40:54 +0800 Subject: [PATCH 38/78] continue coding and save work --- source/libs/executor/src/scanoperator.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 24ed717c8a..42c488edbe 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2913,7 +2913,6 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { SStorageAPI* pAPI = &pTaskInfo->storageAPI; STagScanInfo* pInfo = pOperator->info; - SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; SSDataBlock* pRes = pInfo->pRes; blockDataCleanup(pRes); int32_t count = 0; @@ -2941,6 +2940,8 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aUidTagIdxs, pAPI); } tagScanFillResultBlock(pOperator, pRes, aUidTags, aUidTagIdxs, pAPI); + count = taosArrayGetSize(aUidTagIdxs); + if (taosArrayGetSize(aUidTagIdxs) != 0) { break; } From 7085d6bc11d7c8fb7ef18258273bccdee5817337 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 18:49:17 +0800 Subject: [PATCH 39/78] mxml: disable shared lib --- cmake/mxml_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 87b126d8d3..7377f81c33 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,7 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --enable-shared=no #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install From 4513acfee925eaebf27b1486561afe3afab0ffe4 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 09:54:54 +0800 Subject: [PATCH 40/78] cos: use static libs for mxml, apr, apu, curl --- cmake/apr-util_CMakeLists.txt.in | 2 +- cmake/apr_CMakeLists.txt.in | 2 +- cmake/curl_CMakeLists.txt.in | 2 +- cmake/mxml_CMakeLists.txt.in | 2 +- contrib/CMakeLists.txt | 7 ++++--- source/dnode/vnode/CMakeLists.txt | 2 +- tests/parallel_test/container_build.sh | 4 ++-- 7 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 6172be380e..d98a381005 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ --with-apr=$ENV{HOME}/.cos-local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 538b45a7f9..18c4eb62a1 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(apr-1 UPDATE_DISCONNECTED TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --enable-shared=no #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 1d9d028848..5f1efc1e5a 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --without-ssl + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 7377f81c33..9dcb5df665 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,7 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --enable-shared=no + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --enable-shared=no #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index d20b205e69..452192a288 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -10,7 +10,7 @@ set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY $ENV{HOME}/.cos-local/) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) @@ -389,10 +389,11 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local) + set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local/include) + INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include) + MESSAGE("$ENV{HOME}/.cos-local.1/include") set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index c036fbc54a..684134c2d6 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -196,7 +196,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "$ENV{HOME}/.cos-local/include" + PUBLIC "$ENV{HOME}/.cos-local.1/include" ) if(${BUILD_TEST}) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 62254984a9..f5e426057e 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,7 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/.cos-local:/root/.cos-local \ + -v /root/.cos-local.1:/root/.cos-local.1 \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ @@ -89,7 +89,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/.cos-local:/root/.cos-local \ + -v /root/.cos-local.1:/root/.cos-local.1 \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From 104ead6783d3f5225a8d53c8df07de46c2ac2f9b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 10:06:42 +0800 Subject: [PATCH 41/78] apu: fix apr location --- cmake/apr-util_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index d98a381005..5a68020dd7 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1 #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install From 1d089fe085bd00e75ce389311a8e362e0f9e61a2 Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 11 Aug 2023 11:34:57 +0800 Subject: [PATCH 42/78] fix: dup read lock on windows --- source/dnode/vnode/src/meta/metaQuery.c | 23 ++++++++++++----------- source/libs/executor/src/scanoperator.c | 2 +- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index c26bb45c2b..9c5ab2fbb4 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -1432,37 +1432,38 @@ int32_t metaGetInfo(SMeta *pMeta, int64_t uid, SMetaInfo *pInfo, SMetaReader *pR int32_t code = 0; void *pData = NULL; int nData = 0; - int lock = 0; + bool lock = false; - metaRLock(pMeta); + if (pReader && !(pReader->flags & META_READER_NOLOCK)) { + lock = true; + } + + if(!lock) metaRLock(pMeta); // search cache if (metaCacheGet(pMeta, uid, pInfo) == 0) { - metaULock(pMeta); + if(!lock) metaULock(pMeta); goto _exit; } // search TDB if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData) < 0) { // not found - metaULock(pMeta); + if(!lock) metaULock(pMeta); code = TSDB_CODE_NOT_FOUND; goto _exit; } - metaULock(pMeta); + if(!lock) metaULock(pMeta); pInfo->uid = uid; pInfo->suid = ((SUidIdxVal *)pData)->suid; pInfo->version = ((SUidIdxVal *)pData)->version; pInfo->skmVer = ((SUidIdxVal *)pData)->skmVer; - if (pReader != NULL) { - lock = !(pReader->flags & META_READER_NOLOCK); - if (lock) { - metaULock(pReader->pMeta); - // metaReaderReleaseLock(pReader); - } + if (lock) { + metaULock(pReader->pMeta); + // metaReaderReleaseLock(pReader); } // upsert the cache metaWLock(pMeta); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71b0747be8..9a836caa8c 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -504,7 +504,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int // 1. check if it is existed in meta cache if (pCache == NULL) { - pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, META_READER_NOLOCK, &pHandle->api.metaFn); + pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, 0, &pHandle->api.metaFn); code = pHandle->api.metaReaderFn.getEntryGetUidCache(&mr, pBlock->info.id.uid); if (code != TSDB_CODE_SUCCESS) { // when encounter the TSDB_CODE_PAR_TABLE_NOT_EXIST error, we proceed. From 64a1636dd20bfafafd4faeccde8a964e7a9ade4f Mon Sep 17 00:00:00 2001 From: kailixu Date: Fri, 11 Aug 2023 12:54:45 +0800 Subject: [PATCH 43/78] chore: trigger CI --- source/dnode/vnode/src/meta/metaQuery.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 9c5ab2fbb4..41fc3b3d7e 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -1432,10 +1432,10 @@ int32_t metaGetInfo(SMeta *pMeta, int64_t uid, SMetaInfo *pInfo, SMetaReader *pR int32_t code = 0; void *pData = NULL; int nData = 0; - bool lock = false; + int lock = 0; if (pReader && !(pReader->flags & META_READER_NOLOCK)) { - lock = true; + lock = 1; } if(!lock) metaRLock(pMeta); From 7c39bc989083ce501dd6df5bd980b4edaab07057 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 11 Aug 2023 13:50:41 +0800 Subject: [PATCH 44/78] fix: some minor modifications --- source/libs/executor/inc/executorInt.h | 1 + source/libs/executor/src/scanoperator.c | 77 ++++++++++++++----------- 2 files changed, 44 insertions(+), 34 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index cadf367481..2b25feabb3 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -263,6 +263,7 @@ typedef struct STagScanInfo { void* pCtbCursor; SNode* pTagCond; SNode* pTagIndexCond; + SStorageAPI* pStorageAPI; } STagScanInfo; typedef enum EStreamScanMode { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 42c488edbe..5e0eb71c13 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2767,9 +2767,10 @@ static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { } -static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aUidTagIdxs, void* pVnode, SStorageAPI* pAPI) { +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI) { int32_t code = 0; int32_t numOfTables = taosArrayGetSize(aUidTags); + STagScanFilterContext ctx = {0}; ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); @@ -2777,48 +2778,42 @@ static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aU nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&ctx); SSDataBlock* pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); - if (pResBlock == NULL) { - - } SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; SScalarParam output = {0}; - code = tagScanCreateResultData(&type, numOfTables, &output); - if (code != TSDB_CODE_SUCCESS) { + tagScanCreateResultData(&type, numOfTables, &output); - } - - code = scalarCalculate(pTagCond, pBlockList, &output); - if (code != TSDB_CODE_SUCCESS) { - } + scalarCalculate(pTagCond, pBlockList, &output); bool* result = (bool*)output.columnData->pData; for (int32_t i = 0 ; i < numOfTables; ++i) { if (result[i]) { - taosArrayPush(aUidTagIdxs, &i); + taosArrayPush(aFilterIdxs, &i); } } - taosHashCleanup(ctx.colHash); - taosArrayDestroy(ctx.cInfoList); - blockDataDestroy(pResBlock); - taosArrayDestroy(pBlockList); colDataDestroy(output.columnData); taosMemoryFreeClear(output.columnData); + + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); } static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { if (fmIsScanPseudoColumnFunc(pExprInfo->pExpr->_function.functionId)) { // tbname char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; +// if (pUidTagInfo->name != NULL) { +// STR_TO_VARSTR(str, pUidTagInfo->name); +// } else { // name is not retrieved during filter +// pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); +// } STR_TO_VARSTR(str, "zsl"); - // if (pUidTagInfo->name != NULL) { - // STR_TO_VARSTR(str, pUidTagInfo->name); - // } else { // name is not retrieved during filter - // pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); - // } colDataSetVal(pColInfo, rowIndex, str, false); } else { @@ -2846,13 +2841,15 @@ static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo } } -static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aUidTagIdxs, +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, SStorageAPI* pAPI) { STagScanInfo* pInfo = pOperator->info; SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; - for (int i = 0; i < taosArrayGetSize(aUidTagIdxs); ++i) { - STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, *(int32_t*)taosArrayGet(aUidTagIdxs, i)); + size_t szTables = taosArrayGetSize(aFilterIdxs); + for (int i = 0; i < szTables; ++i) { + int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); @@ -2920,8 +2917,10 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { if (pInfo->pCtbCursor == NULL) { pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); } + SArray* aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); - SArray* aUidTagIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + SArray* aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + while (1) { while (count < pOperator->resultInfo.capacity) { SMCtbCursor* pCur = pInfo->pCtbCursor; @@ -2936,20 +2935,26 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { } int32_t numTables = taosArrayGetSize(aUidTags); - if (numTables != 0 && pInfo->pTagCond != NULL) { - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aUidTagIdxs, pAPI); - } - tagScanFillResultBlock(pOperator, pRes, aUidTags, aUidTagIdxs, pAPI); - count = taosArrayGetSize(aUidTagIdxs); - - if (taosArrayGetSize(aUidTagIdxs) != 0) { + if (numTables == 0) { break; } + + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + + tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, pAPI); + count = taosArrayGetSize(aFilterIdxs); + + if (count != 0) { + break; + } + taosArrayClearEx(aUidTags, tagScanFreeUidTag); - taosArrayClear(aUidTagIdxs); + taosArrayClear(aFilterIdxs); } - taosArrayDestroy(aUidTagIdxs); + + taosArrayDestroy(aFilterIdxs); taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); + pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } @@ -3012,6 +3017,9 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { static void destroyTagScanOperatorInfo(void* param) { STagScanInfo* pInfo = (STagScanInfo*)param; + if (pInfo->pCtbCursor != NULL) { + pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); + } pInfo->pRes = blockDataDestroy(pInfo->pRes); taosArrayDestroy(pInfo->matchInfo.pList); pInfo->pTableListInfo = tableListDestroy(pInfo->pTableListInfo); @@ -3043,6 +3051,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi pInfo->pTagCond = pTagCond; pInfo->pTagIndexCond = pTagIndexCond; pInfo->pTableListInfo = pTableListInfo; + pInfo->pStorageAPI = &pTaskInfo->storageAPI; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; pInfo->curPos = 0; From 1c7f854a719b590f4aa5d201d2543681d1e28975 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 11 Aug 2023 14:47:28 +0800 Subject: [PATCH 45/78] enhance: add only meta ctb index to tag scan physi node --- include/libs/nodes/plannodes.h | 14 +++- source/libs/nodes/src/nodesCloneFuncs.c | 11 ++- source/libs/nodes/src/nodesCodeFuncs.c | 70 ++++++++++++++++++- source/libs/nodes/src/nodesMsgFuncs.c | 89 +++++++++++++++++++++++++ 4 files changed, 180 insertions(+), 4 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 063318332a..0830dc4918 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -334,7 +334,19 @@ typedef struct SScanPhysiNode { bool groupOrderScan; } SScanPhysiNode; -typedef SScanPhysiNode STagScanPhysiNode; +typedef struct STagScanPhysiNode { + // SScanPhysiNode scan; //TODO? + SPhysiNode node; + SNodeList* pScanCols; + SNodeList* pScanPseudoCols; + uint64_t uid; // unique id of the table + uint64_t suid; + int8_t tableType; + SName tableName; + bool groupOrderScan; + bool onlyMetaCtbIdx; //no tbname, tag index not used. +} STagScanPhysiNode; + typedef SScanPhysiNode SBlockDistScanPhysiNode; typedef struct SLastRowScanPhysiNode { diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index f5eacf0bd5..965af41fa7 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -564,7 +564,16 @@ static int32_t physiScanCopy(const SScanPhysiNode* pSrc, SScanPhysiNode* pDst) { } static int32_t physiTagScanCopy(const STagScanPhysiNode* pSrc, STagScanPhysiNode* pDst) { - return physiScanCopy(pSrc, pDst); + COPY_BASE_OBJECT_FIELD(node, physiNodeCopy); + CLONE_NODE_LIST_FIELD(pScanCols); + CLONE_NODE_LIST_FIELD(pScanPseudoCols); + COPY_SCALAR_FIELD(uid); + COPY_SCALAR_FIELD(suid); + COPY_SCALAR_FIELD(tableType); + COPY_OBJECT_FIELD(tableName, sizeof(SName)); + COPY_SCALAR_FIELD(groupOrderScan); + COPY_SCALAR_FIELD(onlyMetaCtbIdx); + return TSDB_CODE_SUCCESS; } static int32_t physiTableScanCopy(const STableScanPhysiNode* pSrc, STableScanPhysiNode* pDst) { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index f25616065e..3540f8cb70 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1562,7 +1562,7 @@ static const char* jkScanPhysiPlanTableName = "TableName"; static const char* jkScanPhysiPlanGroupOrderScan = "GroupOrderScan"; static int32_t physiScanNodeToJson(const void* pObj, SJson* pJson) { - const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + const SScanPhysiNode* pNode = (const SScanPhysiNode*)pObj; int32_t code = physicPlanNodeToJson(pObj, pJson); if (TSDB_CODE_SUCCESS == code) { @@ -1591,7 +1591,7 @@ static int32_t physiScanNodeToJson(const void* pObj, SJson* pJson) { } static int32_t jsonToPhysiScanNode(const SJson* pJson, void* pObj) { - STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + SScanPhysiNode* pNode = (SScanPhysiNode*)pObj; int32_t code = jsonToPhysicPlanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { @@ -1619,6 +1619,70 @@ static int32_t jsonToPhysiScanNode(const SJson* pJson, void* pObj) { return code; } +static const char* jkTagScanPhysiOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; + +static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { + const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + + int32_t code = physicPlanNodeToJson(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkScanPhysiPlanScanCols, pNode->pScanCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkScanPhysiPlanScanPseudoCols, pNode->pScanPseudoCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableId, pNode->uid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanSTableId, pNode->suid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableType, pNode->tableType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkScanPhysiPlanTableName, nameToJson, &pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkScanPhysiPlanGroupOrderScan, pNode->groupOrderScan); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkTagScanPhysiOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); + } + return code; +} + +static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { + STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + + int32_t code = jsonToPhysicPlanNode(pJson, pObj); + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkScanPhysiPlanScanCols, &pNode->pScanCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkScanPhysiPlanScanPseudoCols, &pNode->pScanPseudoCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanTableId, &pNode->uid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanSTableId, &pNode->suid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkScanPhysiPlanTableType, &pNode->tableType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonToObject(pJson, jkScanPhysiPlanTableName, jsonToName, &pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkScanPhysiPlanGroupOrderScan, &pNode->groupOrderScan); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); + } + return code; +} + static const char* jkLastRowScanPhysiPlanGroupTags = "GroupTags"; static const char* jkLastRowScanPhysiPlanGroupSort = "GroupSort"; @@ -6590,6 +6654,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { case QUERY_NODE_LOGIC_PLAN: return logicPlanToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + return physiTableScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: return physiScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: @@ -6908,6 +6973,7 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { case QUERY_NODE_LOGIC_PLAN: return jsonToLogicPlan(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + return jsonToPhysiTagScanNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: case QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN: return jsonToPhysiScanNode(pJson, pObj); diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 20e829766d..4d1120861d 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2003,6 +2003,91 @@ static int32_t msgToPhysiScanNode(STlvDecoder* pDecoder, void* pObj) { return code; } +enum { + PHY_TAG_SCAN_CODE_BASE_NODE = 1, + PHY_TAG_SCAN_CODE_SCAN_COLS, + PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, + PHY_TAG_SCAN_CODE_BASE_UID, + PHY_TAG_SCAN_CODE_BASE_SUID, + PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, + PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, + PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, + PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX +}; + +static int32_t physiTagScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { + const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + + int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_NODE, physiNodeToMsg, &pNode->node); + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_COLS, nodeListToMsg, pNode->pScanCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, nodeListToMsg, pNode->pScanPseudoCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_UID, pNode->uid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_SUID, pNode->suid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI8(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, pNode->tableType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, nameToMsg, &pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, pNode->groupOrderScan); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX, pNode->onlyMetaCtbIdx); + } + return code; +} + +static int32_t msgToPhysiTagScanNode(STlvDecoder* pDecoder, void* pObj) { + STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + + int32_t code = TSDB_CODE_SUCCESS; + STlv* pTlv = NULL; + tlvForEach(pDecoder, pTlv, code) { + switch (pTlv->type) { + case PHY_TAG_SCAN_CODE_BASE_NODE: + code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node); + break; + case PHY_TAG_SCAN_CODE_SCAN_COLS: + code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanCols); + break; + case PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS: + code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanPseudoCols); + break; + case PHY_TAG_SCAN_CODE_BASE_UID: + code = tlvDecodeU64(pTlv, &pNode->uid); + break; + case PHY_TAG_SCAN_CODE_BASE_SUID: + code = tlvDecodeU64(pTlv, &pNode->suid); + break; + case PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE: + code = tlvDecodeI8(pTlv, &pNode->tableType); + break; + case PHY_TAG_SCAN_CODE_BASE_TABLE_NAME: + code = tlvDecodeObjFromTlv(pTlv, msgToName, &pNode->tableName); + break; + case PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN: + code = tlvDecodeBool(pTlv, &pNode->groupOrderScan); + break; + case PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX: + code = tlvDecodeBool(pTlv, &pNode->onlyMetaCtbIdx); + break; + default: + break; + } + } + + return code; +} + enum { PHY_LAST_ROW_SCAN_CODE_SCAN = 1, PHY_LAST_ROW_SCAN_CODE_GROUP_TAGS, @@ -3726,6 +3811,8 @@ static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { code = caseWhenNodeToMsg(pObj, pEncoder); break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + code = physiTagScanNodeToMsg(pObj, pEncoder); + break; case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: code = physiScanNodeToMsg(pObj, pEncoder); break; @@ -3869,6 +3956,8 @@ static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) { code = msgToCaseWhenNode(pDecoder, pObj); break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + code = msgToPhysiTagScanNode(pDecoder, pObj); + break; case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: code = msgToPhysiScanNode(pDecoder, pObj); break; From a0c62d215d36bc980aabe3ebb12ee32521db2c43 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 11 Aug 2023 14:54:43 +0800 Subject: [PATCH 46/78] enhance: tag scan only meta ctb idx backend modification --- source/libs/executor/src/operator.c | 19 ++++++++++--------- source/libs/executor/src/scanoperator.c | 9 ++++++--- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 0fc1b77b73..d0805a86e4 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -370,17 +370,18 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR STableCountScanPhysiNode* pTblCountScanNode = (STableCountScanPhysiNode*)pPhyNode; pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { - STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode; + STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); - int32_t code = createScanTableListInfo(pScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, - pTagIndexCond, pTaskInfo); - if (code != TSDB_CODE_SUCCESS) { - pTaskInfo->code = code; - qError("failed to getTableList, code: %s", tstrerror(code)); - return NULL; + if (!pTagScanPhyNode->onlyMetaCtbIdx) { + int32_t code = createScanTableListInfo(pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, + pTagIndexCond, pTaskInfo); + if (code != TSDB_CODE_SUCCESS) { + pTaskInfo->code = code; + qError("failed to getTableList, code: %s", tstrerror(code)); + return NULL; + } } - - pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); + pOperator = createTagScanOperatorInfo(pHandle, pTagScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN == type) { SBlockDistScanPhysiNode* pBlockNode = (SBlockDistScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 5e0eb71c13..107ea14914 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -3048,10 +3048,13 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi if (code != TSDB_CODE_SUCCESS) { goto _error; } + pInfo->pTagCond = pTagCond; pInfo->pTagIndexCond = pTagIndexCond; - pInfo->pTableListInfo = pTableListInfo; + pInfo->suid = pPhyNode->suid; pInfo->pStorageAPI = &pTaskInfo->storageAPI; + + pInfo->pTableListInfo = pTableListInfo; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; pInfo->curPos = 0; @@ -3062,10 +3065,10 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); + __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScan; pOperator->fpSet = - createOperatorFpSet(optrDummyOpenFn, doTagScan, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); + createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); - pInfo->suid = pPhyNode->suid; return pOperator; _error: From 8987553d9c8cc2c86f8d575ab3bc6431f9b399eb Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 11 Aug 2023 15:56:38 +0800 Subject: [PATCH 47/78] fix: remove ins_modules --- include/libs/nodes/nodes.h | 2 +- source/common/src/systable.c | 2 +- source/dnode/mnode/impl/src/mndShow.c | 2 ++ source/libs/nodes/src/nodesCodeFuncs.c | 2 ++ source/libs/nodes/src/nodesUtilFuncs.c | 4 ++-- source/libs/parser/src/parAstParser.c | 2 ++ source/libs/parser/src/parAuthenticator.c | 2 +- source/libs/parser/src/parTranslater.c | 4 +++- 8 files changed, 14 insertions(+), 6 deletions(-) diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 2319643b09..8eeeff4148 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -169,7 +169,7 @@ typedef enum ENodeType { QUERY_NODE_REVOKE_STMT, QUERY_NODE_SHOW_DNODES_STMT, QUERY_NODE_SHOW_MNODES_STMT, - QUERY_NODE_SHOW_MODULES_STMT, +// QUERY_NODE_SHOW_MODULES_STMT, QUERY_NODE_SHOW_QNODES_STMT, QUERY_NODE_SHOW_SNODES_STMT, QUERY_NODE_SHOW_BNODES_STMT, diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 0940fcef6a..eb8042099f 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -314,7 +314,7 @@ static const SSysDbTableSchema userUserPrivilegesSchema[] = { static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true}, {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true}, - {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true}, + // {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true}, {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true}, {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema), true}, {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true}, diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 44f4751700..7d842be7de 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -58,8 +58,10 @@ static int32_t convertToRetrieveType(char *name, int32_t len) { type = TSDB_MGMT_TABLE_DNODE; } else if (strncasecmp(name, TSDB_INS_TABLE_MNODES, len) == 0) { type = TSDB_MGMT_TABLE_MNODE; +/* } else if (strncasecmp(name, TSDB_INS_TABLE_MODULES, len) == 0) { type = TSDB_MGMT_TABLE_MODULE; +*/ } else if (strncasecmp(name, TSDB_INS_TABLE_QNODES, len) == 0) { type = TSDB_MGMT_TABLE_QNODE; } else if (strncasecmp(name, TSDB_INS_TABLE_SNODES, len) == 0) { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index f25616065e..263d1b21ab 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -197,8 +197,10 @@ const char* nodesNodeName(ENodeType type) { return "ShowDnodesStmt"; case QUERY_NODE_SHOW_MNODES_STMT: return "ShowMnodesStmt"; +/* case QUERY_NODE_SHOW_MODULES_STMT: return "ShowModulesStmt"; +*/ case QUERY_NODE_SHOW_QNODES_STMT: return "ShowQnodesStmt"; case QUERY_NODE_SHOW_SNODES_STMT: diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index c8197721fb..75b63b9d34 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -406,7 +406,7 @@ SNode* nodesMakeNode(ENodeType type) { return makeNode(type, sizeof(SRevokeStmt)); case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT: @@ -982,7 +982,7 @@ void nodesDestroyNode(SNode* pNode) { break; case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT: diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index fdec9cba79..86b4566d37 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -690,8 +690,10 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_MNODES_STMT: return collectMetaKeyFromShowMnodes(pCxt, (SShowStmt*)pStmt); +/* case QUERY_NODE_SHOW_MODULES_STMT: return collectMetaKeyFromShowModules(pCxt, (SShowStmt*)pStmt); +*/ case QUERY_NODE_SHOW_QNODES_STMT: return collectMetaKeyFromShowQnodes(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_SNODES_STMT: diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c index 9b2ac662c8..6a26dcfa8b 100644 --- a/source/libs/parser/src/parAuthenticator.c +++ b/source/libs/parser/src/parAuthenticator.c @@ -263,7 +263,7 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { return authAlterTable(pCxt, (SAlterTableStmt*)pStmt); case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT: diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 38118c03f8..a41447edf3 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -92,6 +92,7 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { .numOfShowCols = 1, .pShowCols = {"*"} }, +/* { .showType = QUERY_NODE_SHOW_MODULES_STMT, .pDbName = TSDB_INFORMATION_SCHEMA_DB, @@ -99,6 +100,7 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { .numOfShowCols = 1, .pShowCols = {"*"} }, +*/ { .showType = QUERY_NODE_SHOW_QNODES_STMT, .pDbName = TSDB_INFORMATION_SCHEMA_DB, @@ -9164,7 +9166,7 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) { case QUERY_NODE_SHOW_USERS_STMT: case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_FUNCTIONS_STMT: case QUERY_NODE_SHOW_INDEXES_STMT: From 84452c8deefc8ca600dcefdff907f6bc4608edcc Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 16:36:53 +0800 Subject: [PATCH 48/78] cos: use static sdk --- cmake/curl_CMakeLists.txt.in | 2 +- source/dnode/vnode/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 5f1efc1e5a..856d42257a 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 684134c2d6..a07e38e53b 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -162,7 +162,7 @@ target_link_libraries( PUBLIC index # s3 - cos_c_sdk + cos_c_sdk_static ${APR_UTIL_LIBRARY} ${APR_LIBRARY} ${MINIXML_LIBRARY} From 6530658815346945aaa333326ab72f54067182c4 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 11 Aug 2023 17:05:59 +0800 Subject: [PATCH 49/78] fix: continue coding --- source/libs/executor/src/operator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index d0805a86e4..7f0c5baa36 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -373,7 +373,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { - int32_t code = createScanTableListInfo(pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, + int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); if (code != TSDB_CODE_SUCCESS) { pTaskInfo->code = code; From e1971cf0a023e1f8e155a27e0fc1e92e2c56b415 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 17:18:38 +0800 Subject: [PATCH 50/78] curl: disable brotli with static lib --- cmake/curl_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 856d42257a..0fe0c2256f 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install From 7b22d37f123234d1c1fbed53cb1be45d72922635 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 11 Aug 2023 17:25:43 +0800 Subject: [PATCH 51/78] fix:lost data when consumer db --- include/libs/wal/wal.h | 7 +- source/dnode/vnode/src/inc/tq.h | 2 +- source/dnode/vnode/src/tq/tqRead.c | 44 ++++----- source/dnode/vnode/src/tq/tqUtil.c | 12 +-- source/libs/wal/src/walRead.c | 142 ++++------------------------- 5 files changed, 39 insertions(+), 168 deletions(-) diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index b19a0d783d..1f7323a06a 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -153,7 +153,6 @@ struct SWalReader { int64_t capacity; TdThreadMutex mutex; SWalFilterCond cond; - // TODO remove it SWalCkHead *pHead; }; @@ -208,9 +207,9 @@ void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset) // only for tq usage void walSetReaderCapacity(SWalReader *pRead, int32_t capacity); -int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead); -int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead); -int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead); +int32_t walFetchHead(SWalReader *pRead, int64_t ver); +int32_t walFetchBody(SWalReader *pRead); +int32_t walSkipFetchBody(SWalReader *pRead); void walRefFirstVer(SWal *, SWalRef *); void walRefLastVer(SWal *, SWalRef *); diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index a6a84075b5..f08c308185 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -127,7 +127,7 @@ void tqDestroyTqHandle(void* data); // tqRead int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* offset); int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset); -int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum, uint64_t reqId); +int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t reqId); // tqExec int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxRsp* pRsp, int32_t* totalRows); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 9b8f1781cb..6c091fa4cb 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -184,50 +184,42 @@ end: return tbSuid == realTbSuid; } -int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** ppCkHead, uint64_t reqId) { - int32_t code = 0; +int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t reqId) { + int32_t code = -1; int32_t vgId = TD_VID(pTq->pVnode); - taosThreadMutexLock(&pHandle->pWalReader->mutex); int64_t offset = *fetchOffset; + int64_t lastVer = walGetLastVer(pHandle->pWalReader->pWal); + int64_t committedVer = walGetCommittedVer(pHandle->pWalReader->pWal); + int64_t appliedVer = walGetAppliedVer(pHandle->pWalReader->pWal); - while (1) { - if (walFetchHead(pHandle->pWalReader, offset, *ppCkHead) < 0) { + wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64 ", applied index:%" PRId64, + vgId, offset, lastVer, committedVer, appliedVer); + + while (offset <= appliedVer) { + if (walFetchHead(pHandle->pWalReader, offset) < 0) { tqDebug("tmq poll: consumer:0x%" PRIx64 ", (epoch %d) vgId:%d offset %" PRId64 ", no more log to return, reqId:0x%" PRIx64, pHandle->consumerId, pHandle->epoch, vgId, offset, reqId); - *fetchOffset = offset; - code = -1; goto END; } tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type: %s, reqId:0x%" PRIx64, vgId, - pHandle->consumerId, offset, TMSG_INFO((*ppCkHead)->head.msgType), reqId); + pHandle->consumerId, offset, TMSG_INFO(pHandle->pWalReader->pHead->head.msgType), reqId); - if ((*ppCkHead)->head.msgType == TDMT_VND_SUBMIT) { - code = walFetchBody(pHandle->pWalReader, ppCkHead); - - if (code < 0) { - *fetchOffset = offset; - code = -1; - goto END; - } - *fetchOffset = offset; - code = 0; + if (pHandle->pWalReader->pHead->head.msgType == TDMT_VND_SUBMIT) { + code = walFetchBody(pHandle->pWalReader); goto END; } else { if (pHandle->fetchMeta != WITH_DATA) { - SWalCont* pHead = &((*ppCkHead)->head); + SWalCont* pHead = &(pHandle->pWalReader->pHead->head); if (IS_META_MSG(pHead->msgType) && !(pHead->msgType == TDMT_VND_DELETE && pHandle->fetchMeta == ONLY_META)) { - code = walFetchBody(pHandle->pWalReader, ppCkHead); + code = walFetchBody(pHandle->pWalReader); if (code < 0) { - *fetchOffset = offset; - code = -1; goto END; } if (isValValidForTable(pHandle, pHead)) { - *fetchOffset = offset; code = 0; goto END; } else { @@ -236,10 +228,8 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea } } } - code = walSkipFetchBody(pHandle->pWalReader, *ppCkHead); + code = walSkipFetchBody(pHandle->pWalReader); if (code < 0) { - *fetchOffset = offset; - code = -1; goto END; } offset++; @@ -247,7 +237,7 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea } END: - taosThreadMutexUnlock(&pHandle->pWalReader->mutex); + *fetchOffset = offset; return code; } diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index 5cbca6e0f2..42aac52c63 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -179,7 +179,6 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, SRpcMsg* pMsg, STqOffsetVal* offset) { int code = 0; int32_t vgId = TD_VID(pTq->pVnode); - SWalCkHead* pCkHead = NULL; SMqMetaRsp metaRsp = {0}; STaosxRsp taosxRsp = {0}; tqInitTaosxRsp(&taosxRsp, *offset); @@ -216,12 +215,6 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, if (offset->type == TMQ_OFFSET__LOG) { walReaderVerifyOffset(pHandle->pWalReader, offset); int64_t fetchVer = offset->version; - pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048); - if (pCkHead == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - code = -1; - goto end; - } walSetReaderCapacity(pHandle->pWalReader, 2048); int totalRows = 0; @@ -234,14 +227,14 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, break; } - if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead, pRequest->reqId) < 0) { + if (tqFetchLog(pTq, pHandle, &fetchVer, pRequest->reqId) < 0) { tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer); // setRequestVersion(&taosxRsp.reqOffset, offset->version); code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId); goto end; } - SWalCont* pHead = &pCkHead->head; + SWalCont* pHead = &pHandle->pWalReader->pHead->head; tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d", pRequest->consumerId, pRequest->epoch, vgId, fetchVer, pHead->msgType); @@ -291,7 +284,6 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, end: tDeleteSTaosxRsp(&taosxRsp); - taosMemoryFreeClear(pCkHead); return code; } diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 54b9576eb1..01404494e3 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -16,10 +16,6 @@ #include "taoserror.h" #include "walInt.h" -static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer); -static int32_t walFetchBodyNew(SWalReader *pRead); -static int32_t walSkipFetchBodyNew(SWalReader *pRead); - SWalReader *walOpenReader(SWal *pWal, SWalFilterCond *cond) { SWalReader *pReader = taosMemoryCalloc(1, sizeof(SWalReader)); if (pReader == NULL) { @@ -80,19 +76,19 @@ int32_t walNextValidMsg(SWalReader *pReader) { return -1; } while (fetchVer <= appliedVer) { - if (walFetchHeadNew(pReader, fetchVer) < 0) { + if (walFetchHead(pReader, fetchVer) < 0) { return -1; } int32_t type = pReader->pHead->head.msgType; if (type == TDMT_VND_SUBMIT || ((type == TDMT_VND_DELETE) && (pReader->cond.deleteMsg == 1)) || (IS_META_MSG(type) && pReader->cond.scanMeta)) { - if (walFetchBodyNew(pReader) < 0) { + if (walFetchBody(pReader) < 0) { return -1; } return 0; } else { - if (walSkipFetchBodyNew(pReader) < 0) { + if (walSkipFetchBody(pReader) < 0) { return -1; } @@ -256,102 +252,7 @@ int32_t walReaderSeekVer(SWalReader *pReader, int64_t ver) { void walSetReaderCapacity(SWalReader *pRead, int32_t capacity) { pRead->capacity = capacity; } -static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) { - int64_t contLen; - bool seeked = false; - - wDebug("vgId:%d, wal starts to fetch head, index:%" PRId64, pRead->pWal->cfg.vgId, fetchVer); - - if (pRead->curVersion != fetchVer) { - if (walReaderSeekVer(pRead, fetchVer) < 0) { - return -1; - } - seeked = true; - } - - while (1) { - contLen = taosReadFile(pRead->pLogFile, pRead->pHead, sizeof(SWalCkHead)); - if (contLen == sizeof(SWalCkHead)) { - break; - } else if (contLen == 0 && !seeked) { - if(walReadSeekVerImpl(pRead, fetchVer) < 0){ - return -1; - } - seeked = true; - continue; - } else { - if (contLen < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - } else { - terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - } - return -1; - } - } -// pRead->curInvalid = 0; - return 0; -} - -static int32_t walFetchBodyNew(SWalReader *pReader) { - SWalCont *pReadHead = &pReader->pHead->head; - int64_t ver = pReadHead->version; - - wDebug("vgId:%d, wal starts to fetch body, ver:%" PRId64 " ,len:%d, total", pReader->pWal->cfg.vgId, ver, - pReadHead->bodyLen); - - if (pReader->capacity < pReadHead->bodyLen) { - SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pReader->pHead, sizeof(SWalCkHead) + pReadHead->bodyLen); - if (ptr == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - - pReader->pHead = ptr; - pReadHead = &pReader->pHead->head; - pReader->capacity = pReadHead->bodyLen; - } - - if (pReadHead->bodyLen != taosReadFile(pReader->pLogFile, pReadHead->body, pReadHead->bodyLen)) { - if (pReadHead->bodyLen < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since %s", - pReader->pWal->cfg.vgId, pReader->pHead->head.version, ver, tstrerror(terrno)); - } else { - wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since file corrupted", - pReader->pWal->cfg.vgId, pReader->pHead->head.version, ver); - terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - } - return -1; - } - - if (walValidBodyCksum(pReader->pHead) != 0) { - wError("vgId:%d, wal fetch body error:%" PRId64 ", since body checksum not passed", pReader->pWal->cfg.vgId, ver); - terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - return -1; - } - - wDebug("vgId:%d, index:%" PRId64 " is fetched, type:%d, cursor advance", pReader->pWal->cfg.vgId, ver, pReader->pHead->head.msgType); - pReader->curVersion = ver + 1; - return 0; -} - -static int32_t walSkipFetchBodyNew(SWalReader *pRead) { - int64_t code; - - code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR); - if (code < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); -// pRead->curInvalid = 1; - return -1; - } - - pRead->curVersion++; - wDebug("vgId:%d, version advance to %" PRId64 ", skip fetch", pRead->pWal->cfg.vgId, pRead->curVersion); - - return 0; -} - -int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { +int32_t walFetchHead(SWalReader *pRead, int64_t ver) { int64_t code; int64_t contLen; bool seeked = false; @@ -369,15 +270,13 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { if (pRead->curVersion != ver) { code = walReaderSeekVer(pRead, ver); if (code < 0) { -// pRead->curVersion = ver; -// pRead->curInvalid = 1; return -1; } seeked = true; } while (1) { - contLen = taosReadFile(pRead->pLogFile, pHead, sizeof(SWalCkHead)); + contLen = taosReadFile(pRead->pLogFile, pRead->pHead, sizeof(SWalCkHead)); if (contLen == sizeof(SWalCkHead)) { break; } else if (contLen == 0 && !seeked) { @@ -392,12 +291,11 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { } else { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } -// pRead->curInvalid = 1; return -1; } } - code = walValidHeadCksum(pHead); + code = walValidHeadCksum(pRead->pHead); if (code != 0) { wError("vgId:%d, unexpected wal log index:%" PRId64 ", since head checksum not passed", pRead->pWal->cfg.vgId, ver); @@ -405,32 +303,27 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { return -1; } -// pRead->curInvalid = 0; return 0; } -int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead) { - int64_t code; - +int32_t walSkipFetchBody(SWalReader *pRead) { wDebug("vgId:%d, skip fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64 ", applied ver:%" PRId64, - pRead->pWal->cfg.vgId, pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, + pRead->pWal->cfg.vgId, pRead->pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer, pRead->pWal->vers.appliedVer); - code = taosLSeekFile(pRead->pLogFile, pHead->head.bodyLen, SEEK_CUR); + int64_t code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR); if (code < 0) { terrno = TAOS_SYSTEM_ERROR(errno); -// pRead->curInvalid = 1; return -1; } pRead->curVersion++; - return 0; } -int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { - SWalCont *pReadHead = &((*ppHead)->head); +int32_t walFetchBody(SWalReader *pRead) { + SWalCont *pReadHead = &pRead->pHead->head; int64_t ver = pReadHead->version; wDebug("vgId:%d, fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64 @@ -439,13 +332,13 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { pRead->pWal->vers.appliedVer); if (pRead->capacity < pReadHead->bodyLen) { - SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(*ppHead, sizeof(SWalCkHead) + pReadHead->bodyLen); + SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pRead->pHead, sizeof(SWalCkHead) + pReadHead->bodyLen); if (ptr == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } - *ppHead = ptr; - pReadHead = &((*ppHead)->head); + pRead->pHead = ptr; + pReadHead = &pRead->pHead->head; pRead->capacity = pReadHead->bodyLen; } @@ -459,27 +352,24 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { pRead->pWal->cfg.vgId, pReadHead->version, ver); terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } -// pRead->curInvalid = 1; return -1; } if (pReadHead->version != ver) { wError("vgId:%d, wal fetch body error, index:%" PRId64 ", read request index:%" PRId64, pRead->pWal->cfg.vgId, pReadHead->version, ver); -// pRead->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } - if (walValidBodyCksum(*ppHead) != 0) { + if (walValidBodyCksum(pRead->pHead) != 0) { wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId, ver); -// pRead->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } - pRead->curVersion = ver + 1; + pRead->curVersion++; return 0; } From 47d2f9ad6df4385e0cb912841204e94cff1d4ed0 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 11 Aug 2023 17:52:52 +0800 Subject: [PATCH 52/78] fix: first run without tag cond --- source/dnode/vnode/src/meta/metaQuery.c | 4 ++-- source/dnode/vnode/src/vnd/vnodeQuery.c | 6 +++--- source/libs/executor/src/operator.c | 1 + source/libs/executor/src/scanoperator.c | 15 +++++++++++---- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 31c7bc8500..39c3dfa080 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -427,7 +427,7 @@ SMCtbCursor *metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock) { metaRLock(pMeta); } - ret = tdbTbcOpen(pMeta->pCtbIdx, &pCtbCur->pCur, NULL); + ret = tdbTbcOpen(pMeta->pCtbIdx, (TBC**)&pCtbCur->pCur, NULL); if (ret < 0) { metaULock(pMeta); taosMemoryFree(pCtbCur); @@ -1365,7 +1365,7 @@ int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) { } int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *pUidTagInfo) { - SMCtbCursor *pCur = metaOpenCtbCursor(((SVnode *)pVnode)->pMeta, suid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 1); // If len > 0 means there already have uids, and we only want the // tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 51f4cee40c..48f8ec021d 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -440,7 +440,7 @@ int32_t vnodeGetTableList(void* pVnode, int8_t type, SArray* pList) { } int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list) { - SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, uid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, uid, 1); while (1) { tb_uid_t id = metaCtbCursorNext(pCur); @@ -462,7 +462,7 @@ int32_t vnodeGetCtbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list) { SVnode *pVnodeObj = pVnode; - SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj->pMeta, suid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj, suid, 1); while (1) { tb_uid_t id = metaCtbCursorNext(pCur); @@ -521,7 +521,7 @@ int32_t vnodeGetStbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo } int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) { - SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, suid, 0); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 0); if (!pCur) { return TSDB_CODE_FAILED; } diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 7f0c5baa36..31998d13b6 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -371,6 +371,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; + pTagScanPhyNode->onlyMetaCtbIdx = true; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 107ea14914..5f8bf03d80 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2922,7 +2922,8 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { SArray* aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); while (1) { - while (count < pOperator->resultInfo.capacity) { + int32_t numTables = 0; + while (numTables < pOperator->resultInfo.capacity) { SMCtbCursor* pCur = pInfo->pCtbCursor; tb_uid_t uid = pAPI->metaFn.ctbCursorNext(pInfo->pCtbCursor); if (uid == 0) { @@ -2932,14 +2933,19 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { info.pTagVal = taosMemoryMalloc(pCur->vLen); memcpy(info.pTagVal, pCur->pVal, pCur->vLen); taosArrayPush(aUidTags, &info); + ++numTables; } - int32_t numTables = taosArrayGetSize(aUidTags); if (numTables == 0) { break; } - - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + if (pInfo->pTagCond != NULL) { + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + } else { + for (int i = 0; i < numTables; ++i) { + taosArrayPush(aFilterIdxs, &i); + } + } tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, pAPI); count = taosArrayGetSize(aFilterIdxs); @@ -2955,6 +2961,7 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { taosArrayDestroy(aFilterIdxs); taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); + pRes->info.rows = count; pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } From 767563752a033eb0cc18f508eb75c802204189d4 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 11 Aug 2023 20:21:13 +0800 Subject: [PATCH 53/78] opti:wal logic --- source/dnode/vnode/src/tq/tqRead.c | 2 ++ source/dnode/vnode/src/tq/tqUtil.c | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 6c091fa4cb..e74146bab2 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -224,6 +224,7 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t goto END; } else { offset++; + code = -1; continue; } } @@ -234,6 +235,7 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t } offset++; } + code = -1; } END: diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index 42aac52c63..b7fd505784 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -216,7 +216,6 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, walReaderVerifyOffset(pHandle->pWalReader, offset); int64_t fetchVer = offset->version; - walSetReaderCapacity(pHandle->pWalReader, 2048); int totalRows = 0; while (1) { int32_t savedEpoch = atomic_load_32(&pHandle->epoch); From f1baed79b4eb6f136b814204fff906ac0b5969cf Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 11 Aug 2023 20:22:51 +0800 Subject: [PATCH 54/78] opti:rm useless code --- include/libs/wal/wal.h | 1 - source/libs/wal/src/walRead.c | 1 - 2 files changed, 2 deletions(-) diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index 1f7323a06a..cfe70a186c 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -206,7 +206,6 @@ void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64 void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset); // only for tq usage -void walSetReaderCapacity(SWalReader *pRead, int32_t capacity); int32_t walFetchHead(SWalReader *pRead, int64_t ver); int32_t walFetchBody(SWalReader *pRead); int32_t walSkipFetchBody(SWalReader *pRead); diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 01404494e3..d9e43e4324 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -250,7 +250,6 @@ int32_t walReaderSeekVer(SWalReader *pReader, int64_t ver) { return 0; } -void walSetReaderCapacity(SWalReader *pRead, int32_t capacity) { pRead->capacity = capacity; } int32_t walFetchHead(SWalReader *pRead, int64_t ver) { int64_t code; From edd2fa4f351c3c3434c2143822b0a188a1c305d1 Mon Sep 17 00:00:00 2001 From: slzhou Date: Sat, 12 Aug 2023 08:17:43 +0800 Subject: [PATCH 55/78] fix: pass compilation and simple test --- source/libs/executor/src/scanoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 5f8bf03d80..ac20bae167 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2940,7 +2940,7 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { break; } if (pInfo->pTagCond != NULL) { - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI); } else { for (int i = 0; i < numTables; ++i) { taosArrayPush(aFilterIdxs, &i); From f83bfec067deb4de1ab9e98434094f0d0e20a8cf Mon Sep 17 00:00:00 2001 From: slzhou Date: Sat, 12 Aug 2023 08:28:25 +0800 Subject: [PATCH 56/78] fix: change only meta ctb idx back to false --- source/libs/executor/src/operator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 31998d13b6..abef8298e5 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -371,7 +371,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; - pTagScanPhyNode->onlyMetaCtbIdx = true; + pTagScanPhyNode->onlyMetaCtbIdx = false; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, From efb472d54af8a8046245757c522ff91f3c3339d0 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Sat, 12 Aug 2023 10:15:40 +0800 Subject: [PATCH 57/78] fix:heap use after free --- source/dnode/vnode/src/tq/tqRead.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index e74146bab2..252a0642fa 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -219,6 +219,7 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t goto END; } + pHead = &(pHandle->pWalReader->pHead->head); if (isValValidForTable(pHandle, pHead)) { code = 0; goto END; From c4bd52d434c2e12f59d869857083775903d97ce3 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Sun, 13 Aug 2023 18:14:22 +0800 Subject: [PATCH 58/78] fix:disable test case 5dnode3mnodeRoll --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index c8b8ce87fd..a946a7feaf 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -456,7 +456,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3 ,,n,system-test,python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3 -,,n,system-test,python ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 +#,,n,system-test,python ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 From 6688d70ba41400c83e9686e0ca6565dc861b1327 Mon Sep 17 00:00:00 2001 From: slzhou Date: Sun, 13 Aug 2023 18:46:55 +0800 Subject: [PATCH 59/78] fix: fix planner test error --- source/libs/nodes/src/nodesCodeFuncs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 3540f8cb70..a2de0bc63a 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -6654,7 +6654,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { case QUERY_NODE_LOGIC_PLAN: return logicPlanToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: - return physiTableScanNodeToJson(pObj, pJson); + return physiTagScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: return physiScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: From 0aa35987ab91f1e41a8799eaee2570817dc270e6 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 14 Aug 2023 09:45:04 +0800 Subject: [PATCH 60/78] Update 5dnode3mnodeRoll.py --- tests/system-test/6-cluster/5dnode3mnodeRoll.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/6-cluster/5dnode3mnodeRoll.py b/tests/system-test/6-cluster/5dnode3mnodeRoll.py index 38ac47f777..9d62eb3b4b 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRoll.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRoll.py @@ -27,7 +27,7 @@ import threading import time import json -BASEVERSION = "3.1.0.0" +BASEVERSION = "3.1.1.0" class TDTestCase: From c97b9249fc4e08667648d28ab577828ff8640dd7 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 14 Aug 2023 14:38:28 +0800 Subject: [PATCH 61/78] cos: only for linux --- cmake/cmake.options | 4 ++ contrib/CMakeLists.txt | 9 +++-- source/common/src/tglobal.c | 2 + source/dnode/vnode/CMakeLists.txt | 56 +++++++++++++++------------ source/dnode/vnode/src/vnd/vnodeCos.c | 14 +++++++ 5 files changed, 57 insertions(+), 28 deletions(-) diff --git a/cmake/cmake.options b/cmake/cmake.options index ea5efcb13a..1d4e9ba515 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -125,12 +125,16 @@ option( ON ) +IF(${TD_LINUX}) + option( BUILD_WITH_COS "If build with cos" ON ) +ENDIF () + option( BUILD_WITH_SQLITE "If build with sqlite" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 452192a288..e3e48ac3a1 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -6,6 +6,8 @@ function(cat IN_FILE OUT_FILE) file(APPEND ${OUT_FILE} "${CONTENTS}") endfunction(cat IN_FILE OUT_FILE) +if(${TD_LINUX}) + set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) @@ -35,6 +37,8 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . execute_process(COMMAND "${CMAKE_COMMAND}" --build . WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +endif(${TD_LINUX}) + set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -388,7 +392,7 @@ endif() # cos if(${BUILD_WITH_COS}) - if(NOT ${TD_WINDOWS}) + if(${TD_LINUX}) set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) @@ -406,10 +410,9 @@ if(${BUILD_WITH_COS}) ) set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) - else() - endif(NOT ${TD_WINDOWS}) + endif(${TD_LINUX}) endif(${BUILD_WITH_COS}) # lucene diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 2447e02698..3595347db3 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -292,7 +292,9 @@ int32_t taosSetS3Cfg(SConfig *pCfg) { } } if (tsS3BucketName[0] != '<' && tsDiskCfgNum > 1) { +#ifdef USE_COS tsS3Enabled = true; +#endif } return 0; diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index a07e38e53b..052b6be37f 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,12 +135,6 @@ else() endif() endif() -set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") -find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -find_library(MINIXML_LIBRARY mxml) -find_library(CURL_LIBRARY curl) - target_link_libraries( vnode PUBLIC os @@ -160,28 +154,24 @@ target_link_libraries( PUBLIC transport PUBLIC stream PUBLIC index - - # s3 - cos_c_sdk_static - ${APR_UTIL_LIBRARY} - ${APR_LIBRARY} - ${MINIXML_LIBRARY} - ${CURL_LIBRARY} ) -IF (TD_GRANT) - TARGET_LINK_LIBRARIES(vnode PUBLIC grant) -ENDIF () +if(${TD_LINUX}) +set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) +target_link_libraries( + vnode -target_compile_definitions(vnode PUBLIC -DMETA_REFACT) - -if(${BUILD_WITH_INVERTEDINDEX}) - add_definitions(-DUSE_INVERTED_INDEX) -endif(${BUILD_WITH_INVERTEDINDEX}) - -if(${BUILD_WITH_ROCKSDB}) - add_definitions(-DUSE_ROCKSDB) -endif(${BUILD_WITH_ROCKSDB}) + # s3 + PUBLIC cos_c_sdk_static + PUBLIC ${APR_UTIL_LIBRARY} + PUBLIC ${APR_LIBRARY} + PUBLIC ${MINIXML_LIBRARY} + PUBLIC ${CURL_LIBRARY} +) # s3 FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) @@ -199,6 +189,22 @@ target_include_directories( PUBLIC "$ENV{HOME}/.cos-local.1/include" ) +endif(${TD_LINUX}) + +IF (TD_GRANT) + TARGET_LINK_LIBRARIES(vnode PUBLIC grant) +ENDIF () + +target_compile_definitions(vnode PUBLIC -DMETA_REFACT) + +if(${BUILD_WITH_INVERTEDINDEX}) + add_definitions(-DUSE_INVERTED_INDEX) +endif(${BUILD_WITH_INVERTEDINDEX}) + +if(${BUILD_WITH_ROCKSDB}) + add_definitions(-DUSE_ROCKSDB) +endif(${BUILD_WITH_ROCKSDB}) + if(${BUILD_TEST}) add_subdirectory(test) endif(${BUILD_TEST}) diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index a40e046972..52d5fc2b1b 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -12,6 +12,7 @@ extern char tsS3AccessKeySecret[]; extern char tsS3BucketName[]; extern char tsS3AppId[]; +#ifdef USE_COS int32_t s3Init() { if (cos_http_io_initialize(NULL, 0) != COSE_OK) { return -1; @@ -294,3 +295,16 @@ long s3Size(const char *object_name) { return size; } + +#else + +int32_t s3Init() { return 0; } +void s3CleanUp() {} +void s3PutObjectFromFile(const char *file, const char *object) {} +void s3DeleteObjects(const char *object_name[], int nobject) {} +bool s3Exists(const char *object_name) { return false; } +bool s3Get(const char *object_name, const char *path) { return false; } +void s3EvictCache(const char *path, long object_size) {} +long s3Size(const char *object_name) { return 0; } + +#endif From 989abc2bf6264ea6c2837c3daa44027581405807 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 14 Aug 2023 15:03:17 +0800 Subject: [PATCH 62/78] vnode/cos: move includes into USE_COS --- source/dnode/vnode/src/vnd/vnodeCos.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 52d5fc2b1b..b28b7ad747 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -2,10 +2,6 @@ #include "vndCos.h" -#include "cos_api.h" -#include "cos_http_io.h" -#include "cos_log.h" - extern char tsS3Endpoint[]; extern char tsS3AccessKeyId[]; extern char tsS3AccessKeySecret[]; @@ -13,6 +9,10 @@ extern char tsS3BucketName[]; extern char tsS3AppId[]; #ifdef USE_COS +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + int32_t s3Init() { if (cos_http_io_initialize(NULL, 0) != COSE_OK) { return -1; From 26951294cfa76ef77a1b185346a9cf6637982967 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 14 Aug 2023 15:10:18 +0800 Subject: [PATCH 63/78] Update cases.task --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index a946a7feaf..0a65340976 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -456,7 +456,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3 ,,n,system-test,python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3 -#,,n,system-test,python ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 +,,n,system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 From 5c276fa547c0b5ba9a7a7332968ecc89f2b51105 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Fri, 11 Aug 2023 17:15:17 +0800 Subject: [PATCH 64/78] fix: make kill query work for sysscanoperator --- source/libs/executor/inc/querytask.h | 2 +- source/libs/executor/inc/tsort.h | 3 ++- source/libs/executor/src/querytask.c | 2 +- source/libs/executor/src/scanoperator.c | 5 +++-- source/libs/executor/src/sysscanoperator.c | 5 +++++ source/libs/executor/src/tsort.c | 13 ++++++++++--- 6 files changed, 22 insertions(+), 8 deletions(-) diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index 7241b015a0..0742b9ba4c 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -99,7 +99,7 @@ struct SExecTaskInfo { void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst); SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model, SStorageAPI* pAPI); void doDestroyTask(SExecTaskInfo* pTaskInfo); -bool isTaskKilled(SExecTaskInfo* pTaskInfo); +bool isTaskKilled(void* pTaskInfo); void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode); void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status); int32_t createExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index 57c8bce275..3180173ca7 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -191,7 +191,8 @@ int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols); bool tsortIsClosed(SSortHandle* pHandle); void tsortSetClosed(SSortHandle* pHandle); -void setSingleTableMerge(SSortHandle* pHandle); +void tsortSetSingleTableMerge(SSortHandle* pHandle); +void tsortSetAbortCheckFn(SSortHandle* pHandle, bool (*checkFn)(void* param), void* param); #ifdef __cplusplus } diff --git a/source/libs/executor/src/querytask.c b/source/libs/executor/src/querytask.c index 22d171e74a..980ef1a61a 100644 --- a/source/libs/executor/src/querytask.c +++ b/source/libs/executor/src/querytask.c @@ -59,7 +59,7 @@ SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOP return pTaskInfo; } -bool isTaskKilled(SExecTaskInfo* pTaskInfo) { return (0 != pTaskInfo->code); } +bool isTaskKilled(void* pTaskInfo) { return (0 != ((SExecTaskInfo*)pTaskInfo)->code); } void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode) { pTaskInfo->code = rspCode; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71b0747be8..a3c5a76a7f 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2928,8 +2928,9 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_BLOCK_TS_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->pSortInputBlock, pTaskInfo->id.str, 0, 0, 0); - + tsortSetMergeLimit(pInfo->pSortHandle, mergeLimit); + tsortSetAbortCheckFn(pInfo->pSortHandle, isTaskKilled, pOperator->pTaskInfo); } tsortSetFetchRawDataFp(pInfo->pSortHandle, getBlockForTableMergeScan, NULL, NULL); @@ -2949,7 +2950,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { int32_t code = TSDB_CODE_SUCCESS; if (numOfTable == 1) { - setSingleTableMerge(pInfo->pSortHandle); + tsortSetSingleTableMerge(pInfo->pSortHandle); } else { code = tsortOpen(pInfo->pSortHandle); } diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index a1f83dda2f..9048dd43d7 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -1601,6 +1601,11 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { SSysTableScanInfo* pInfo = pOperator->info; char dbName[TSDB_DB_NAME_LEN] = {0}; + if (isTaskKilled(pOperator->pTaskInfo)) { + setOperatorCompleted(pOperator); + return NULL; + } + blockDataCleanup(pInfo->pRes); const char* name = tNameGetTableName(&pInfo->name); diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 1891e93c61..6c4a780dfb 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -71,12 +71,20 @@ struct SSortHandle { SMultiwayMergeTreeInfo* pMergeTree; bool singleTableMerge; + + bool (*abortCheckFn)(void* param); + void* abortCheckParam; }; -void setSingleTableMerge(SSortHandle* pHandle) { +void tsortSetSingleTableMerge(SSortHandle* pHandle) { pHandle->singleTableMerge = true; } +void tsortSetAbortCheckFn(SSortHandle *pHandle, bool (*checkFn)(void *), void* param) { + pHandle->abortCheckFn = checkFn; + pHandle->abortCheckParam = param; +} + static int32_t msortComparFn(const void* pLeft, const void* pRight, void* param); // | offset[0] | offset[1] |....| nullbitmap | data |...| @@ -726,11 +734,10 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t)); while (1) { - if (tsortIsClosed(pHandle)) { + if (tsortIsClosed(pHandle) || (pHandle->abortCheckFn && pHandle->abortCheckFn(pHandle->abortCheckParam))) { code = terrno = TSDB_CODE_TSC_QUERY_CANCELLED; return code; } - SSDataBlock* pDataBlock = getSortedBlockDataInner(pHandle, &pHandle->cmpParam, numOfRows); if (pDataBlock == NULL) { break; From 57d1957dee1d8a738dfe0f2fe3efd9716433a280 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 14 Aug 2023 15:57:27 +0800 Subject: [PATCH 65/78] enhance: tag scan code refactoring --- source/libs/executor/inc/executorInt.h | 2 + source/libs/executor/src/scanoperator.c | 110 +++++++++--------------- 2 files changed, 43 insertions(+), 69 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 2b25feabb3..cb066d809c 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -263,6 +263,8 @@ typedef struct STagScanInfo { void* pCtbCursor; SNode* pTagCond; SNode* pTagIndexCond; + SArray* aUidTags; // SArray + SArray* aFilterIdxs; // SArray SStorageAPI* pStorageAPI; } STagScanInfo; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ac20bae167..71352b1c6e 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2813,7 +2813,7 @@ static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo // } else { // name is not retrieved during filter // pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); // } - STR_TO_VARSTR(str, "zsl"); + STR_TO_VARSTR(str, "ctbidx"); colDataSetVal(pColInfo, rowIndex, str, false); } else { @@ -2841,66 +2841,32 @@ static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo } } -static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, bool ignoreFilterIdx, SStorageAPI* pAPI) { STagScanInfo* pInfo = pOperator->info; SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; - - size_t szTables = taosArrayGetSize(aFilterIdxs); - for (int i = 0; i < szTables; ++i) { - int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); - STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); - for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { - SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); - tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + if (!ignoreFilterIdx) { + size_t szTables = taosArrayGetSize(aFilterIdxs); + for (int i = 0; i < szTables; ++i) { + int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + } } - } - return 0; -} - -#if 0 -static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, - SStorageAPI* pAPI) { - STagScanInfo* pInfo = pOperator->info; - SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; - - int32_t nTbls = taosArrayGetSize(aUidTags); - for (int i = 0; i < nTbls; ++i) { - STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); - for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { - SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); - - // refactor later - if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) { - char str[512]; - - STR_TO_VARSTR(str, "zsl"); - colDataSetVal(pDst, (i), str, false); - } else { // it is a tag value - STagVal val = {0}; - val.cid = pExprInfo[j].base.pParam[0].pCol->colId; - const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pDst->info.type, &val); - - char* data = NULL; - if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) { - data = tTagValToData((const STagVal*)p, false); - } else { - data = (char*)p; - } - colDataSetVal(pDst, i, data, - (data == NULL) || (pDst->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data))); - - if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) && - data != NULL) { - taosMemoryFree(data); - } + } else { + size_t szTables = taosArrayGetSize(aUidTags); + for (int i = 0; i < szTables; ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); } } } return 0; } -#endif - static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { @@ -2912,16 +2878,19 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { STagScanInfo* pInfo = pOperator->info; SSDataBlock* pRes = pInfo->pRes; blockDataCleanup(pRes); - int32_t count = 0; if (pInfo->pCtbCursor == NULL) { pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); } - SArray* aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); - SArray* aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + SArray* aUidTags = pInfo->aUidTags; + SArray* aFilterIdxs = pInfo->aFilterIdxs; + int32_t count = 0; while (1) { + taosArrayClearEx(aUidTags, tagScanFreeUidTag); + taosArrayClear(aFilterIdxs); + int32_t numTables = 0; while (numTables < pOperator->resultInfo.capacity) { SMCtbCursor* pCur = pInfo->pCtbCursor; @@ -2939,34 +2908,29 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { if (numTables == 0) { break; } + bool ignoreFilterIdx = true; if (pInfo->pTagCond != NULL) { + ignoreFilterIdx = false; tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI); } else { - for (int i = 0; i < numTables; ++i) { - taosArrayPush(aFilterIdxs, &i); - } + ignoreFilterIdx = true; } - tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, pAPI); - count = taosArrayGetSize(aFilterIdxs); + tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, ignoreFilterIdx, pAPI); + + count = ignoreFilterIdx ? taosArrayGetSize(aUidTags): taosArrayGetSize(aFilterIdxs); if (count != 0) { break; } - - taosArrayClearEx(aUidTags, tagScanFreeUidTag); - taosArrayClear(aFilterIdxs); } - - taosArrayDestroy(aFilterIdxs); - taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); - + pRes->info.rows = count; pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } -static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { +static SSDataBlock* doTagScanFromMetaEntry(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; } @@ -3027,6 +2991,10 @@ static void destroyTagScanOperatorInfo(void* param) { if (pInfo->pCtbCursor != NULL) { pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); } + + taosArrayDestroy(pInfo->aFilterIdxs); + taosArrayDestroyEx(pInfo->aUidTags, tagScanFreeUidTag); + pInfo->pRes = blockDataDestroy(pInfo->pRes); taosArrayDestroy(pInfo->matchInfo.pList); pInfo->pTableListInfo = tableListDestroy(pInfo->pTableListInfo); @@ -3072,7 +3040,11 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); - __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScan; + if (pPhyNode->onlyMetaCtbIdx) { + pInfo->aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); + pInfo->aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + } + __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); From 60191bef9eb67c3ff2c7c2281d35b06e3178c145 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 14 Aug 2023 16:24:56 +0800 Subject: [PATCH 66/78] fix:tmq interface & remove snapshot code --- docs/en/14-reference/03-connector/04-java.mdx | 3 -- docs/examples/c/tmq_example.c | 6 --- docs/examples/go/sub/main.go | 1 - docs/zh/08-connector/10-cpp.mdx | 9 ++++ examples/c/tmq.c | 5 -- include/client/taos.h | 54 ++++++++----------- 6 files changed, 31 insertions(+), 47 deletions(-) diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx index 69bbd287ed..b12ef38ea8 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -1091,8 +1091,6 @@ public abstract class ConsumerLoop { config.setProperty("client.id", "1"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); - config.setProperty("experimental.snapshot.enable", "true"); - this.consumer = new TaosConsumer<>(config); this.topics = Collections.singletonList("topic_speed"); @@ -1176,7 +1174,6 @@ public abstract class ConsumerLoop { config.setProperty("client.id", "1"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); - config.setProperty("experimental.snapshot.enable", "true"); this.consumer = new TaosConsumer<>(config); this.topics = Collections.singletonList("topic_speed"); diff --git a/docs/examples/c/tmq_example.c b/docs/examples/c/tmq_example.c index d958428b8f..ca7c23e93b 100644 --- a/docs/examples/c/tmq_example.c +++ b/docs/examples/c/tmq_example.c @@ -227,12 +227,6 @@ tmq_t* build_consumer() { return NULL; } - code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); - if (TMQ_CONF_OK != code) { - tmq_conf_destroy(conf); - return NULL; - } - tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); diff --git a/docs/examples/go/sub/main.go b/docs/examples/go/sub/main.go index cb24e351ab..ed335cfdea 100644 --- a/docs/examples/go/sub/main.go +++ b/docs/examples/go/sub/main.go @@ -35,7 +35,6 @@ func main() { "td.connect.port": "6030", "client.id": "test_tmq_client", "enable.auto.commit": "false", - "experimental.snapshot.enable": "true", "msg.with.table.name": "true", }) if err != nil { diff --git a/docs/zh/08-connector/10-cpp.mdx b/docs/zh/08-connector/10-cpp.mdx index 9c5095f09c..53c4bca755 100644 --- a/docs/zh/08-connector/10-cpp.mdx +++ b/docs/zh/08-connector/10-cpp.mdx @@ -515,3 +515,12 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 - 带_raw的接口通过传递的参数lines指针和长度len来表示数据,为了解决原始接口数据包含'\0'而被截断的问题。totalRows指针返回解析出来的数据行数。 - 带_ttl的接口可以传递ttl参数来控制建表的ttl到期时间。 - 带_reqid的接口可以通过传递reqid参数来追踪整个的调用链。 + +### 数据订阅 API + +除了使用 SQL 方式或者使用参数绑定 API 写入数据外,还可以使用 Schemaless 的方式完成写入。Schemaless 可以免于预先创建超级表/数据子表的数据结构,而是可以直接写入数据,TDengine 系统会根据写入的数据内容自动创建和维护所需要的表结构。Schemaless 的使用方式详见 [Schemaless 写入](/reference/schemaless/) 章节,这里介绍与之配套使用的 C/C++ API。 + +- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` + +**功能说明** +该接口将行协议的文本数据写入到 TDengine 中。 diff --git a/examples/c/tmq.c b/examples/c/tmq.c index e1133c109e..136c54b874 100644 --- a/examples/c/tmq.c +++ b/examples/c/tmq.c @@ -228,11 +228,6 @@ tmq_t* build_consumer() { tmq_conf_destroy(conf); return NULL; } - code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); - if (TMQ_CONF_OK != code) { - tmq_conf_destroy(conf); - return NULL; - } tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq = tmq_consumer_new(conf, NULL, 0); diff --git a/include/client/taos.h b/include/client/taos.h index 3cc2d907ab..a8136461f8 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -260,19 +260,14 @@ typedef struct tmq_t tmq_t; typedef struct tmq_conf_t tmq_conf_t; typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); +typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); -DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); +typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +} tmq_conf_res_t; -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); - -DLL_EXPORT const char *tmq_err2str(int32_t code); - -/* ------------------------TMQ CONSUMER INTERFACE------------------------ */ typedef struct tmq_topic_assignment { int32_t vgId; int64_t currentOffset; @@ -280,6 +275,18 @@ typedef struct tmq_topic_assignment { int64_t end; } tmq_topic_assignment; +DLL_EXPORT tmq_conf_t *tmq_conf_new(); +DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); +DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); +DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + +DLL_EXPORT tmq_list_t *tmq_list_new(); +DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); +DLL_EXPORT void tmq_list_destroy(tmq_list_t *); +DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); +DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + +DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); @@ -289,34 +296,17 @@ DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); -DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, - int32_t *numOfAssignment); +DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); +DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); +DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); -DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); -DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); - -/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */ - -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; - -typedef enum tmq_conf_res_t tmq_conf_res_t; - -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); - -/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */ +DLL_EXPORT const char *tmq_err2str(int32_t code); /* ------------------------------ TAOSX -----------------------------------*/ // note: following apis are unstable From f9c897221cc66cd49bfeaa0905e802a305aece6a Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 14 Aug 2023 16:30:31 +0800 Subject: [PATCH 67/78] fix: move the setting of onlyCtbIdx to front end --- source/libs/executor/src/operator.c | 1 - source/libs/planner/src/planPhysiCreater.c | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index abef8298e5..7f0c5baa36 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -371,7 +371,6 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; - pTagScanPhyNode->onlyMetaCtbIdx = false; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 06859e195d..8efa9c1048 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -511,6 +511,20 @@ static int32_t createSimpleScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSub return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, pScan, pPhyNode); } +static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, + SPhysiNode** pPhyNode) { + STagScanPhysiNode* pScan = + (STagScanPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pScanLogicNode, QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN); + if (NULL == pScan) { + return TSDB_CODE_OUT_OF_MEMORY; + } + vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); + + pScan->onlyMetaCtbIdx = false; + + return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); +} + static int32_t createLastRowScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, SPhysiNode** pPhyNode) { SLastRowScanPhysiNode* pScan = @@ -646,6 +660,7 @@ static int32_t createScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, pCxt->hasScan = true; switch (pScanLogicNode->scanType) { case SCAN_TYPE_TAG: + return createTagScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode); case SCAN_TYPE_BLOCK_INFO: return createSimpleScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode); case SCAN_TYPE_TABLE_COUNT: From 8d0461e98ca13a58c4bee1b9964a1ee3920b5346 Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 14 Aug 2023 16:31:58 +0800 Subject: [PATCH 68/78] fix: use taos_static for tmq_sim on windows --- cmake/cmake.define | 6 ++++++ tests/script/wtest.bat | 14 +++++++++++--- utils/test/c/CMakeLists.txt | 2 +- utils/tsim/CMakeLists.txt | 2 +- 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index cf7f450994..6f4153c7d0 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -78,6 +78,12 @@ ELSE () SET(TD_TAOS_TOOLS TRUE) ENDIF () +IF (${TD_WINDOWS}) + SET(TAOS_LIB taos_static) +ELSE () + SET(TAOS_LIB taos) +ENDIF () + IF (TD_WINDOWS) MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd") diff --git a/tests/script/wtest.bat b/tests/script/wtest.bat index b642bad285..88ae703b7c 100644 --- a/tests/script/wtest.bat +++ b/tests/script/wtest.bat @@ -17,6 +17,9 @@ rem echo SIM_DIR: %SIM_DIR% set "TSIM_DIR=%SIM_DIR%tsim\" rem echo TSIM_DIR: %TSIM_DIR% +set "DATA_DIR=%TSIM_DIR%data\" +rem echo DATA_DIR: %DATA_DIR% + set "CFG_DIR=%TSIM_DIR%cfg\" rem echo CFG_DIR: %CFG_DIR% @@ -30,25 +33,30 @@ if not exist %SIM_DIR% mkdir %SIM_DIR% if not exist %TSIM_DIR% mkdir %TSIM_DIR% if exist %CFG_DIR% rmdir /s/q %CFG_DIR% if exist %LOG_DIR% rmdir /s/q %LOG_DIR% +if exist %DATA_DIR% rmdir /s/q %DATA_DIR% if not exist %CFG_DIR% mkdir %CFG_DIR% if not exist %LOG_DIR% mkdir %LOG_DIR% +if not exist %DATA_DIR% mkdir %DATA_DIR% set "fqdn=localhost" for /f "skip=1" %%A in ( 'wmic computersystem get caption' ) do if not defined fqdn set "fqdn=%%A" -echo firstEp %fqdn% > %TAOS_CFG% +echo firstEp %fqdn%:7100 > %TAOS_CFG% +echo secondEp %fqdn%:7200 >> %TAOS_CFG% echo fqdn %fqdn% >> %TAOS_CFG% echo serverPort 7100 >> %TAOS_CFG% +echo dataDir %DATA_DIR% >> %TAOS_CFG% echo logDir %LOG_DIR% >> %TAOS_CFG% echo scriptDir %SCRIPT_DIR% >> %TAOS_CFG% echo numOfLogLines 100000000 >> %TAOS_CFG% echo rpcDebugFlag 143 >> %TAOS_CFG% echo tmrDebugFlag 131 >> %TAOS_CFG% -echo cDebugFlag 135 >> %TAOS_CFG% +echo cDebugFlag 143 >> %TAOS_CFG% echo qDebugFlag 143 >> %TAOS_CFG% -echo udebugFlag 135 >> %TAOS_CFG% +echo uDebugFlag 143 >> %TAOS_CFG% +echo debugFlag 143 >> %TAOS_CFG% echo wal 0 >> %TAOS_CFG% echo asyncLog 0 >> %TAOS_CFG% echo locale en_US.UTF-8 >> %TAOS_CFG% diff --git a/utils/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt index 3f52fc8e5d..b96814c13b 100644 --- a/utils/test/c/CMakeLists.txt +++ b/utils/test/c/CMakeLists.txt @@ -31,7 +31,7 @@ target_link_libraries( ) target_link_libraries( tmq_sim - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os diff --git a/utils/tsim/CMakeLists.txt b/utils/tsim/CMakeLists.txt index c2cf7ac3c5..209982c659 100644 --- a/utils/tsim/CMakeLists.txt +++ b/utils/tsim/CMakeLists.txt @@ -2,7 +2,7 @@ aux_source_directory(src TSIM_SRC) add_executable(tsim ${TSIM_SRC}) target_link_libraries( tsim - PUBLIC taos_static + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os From 62cad7c54f9a1784a03949c152c0bdb684f21316 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 14 Aug 2023 17:16:11 +0800 Subject: [PATCH 69/78] docs:tmq interface --- docs/en/07-develop/07-tmq.mdx | 88 ++++++++++++++++++++++----------- docs/zh/07-develop/07-tmq.mdx | 88 +++++++++++++++++++++++---------- docs/zh/08-connector/10-cpp.mdx | 9 ---- 3 files changed, 121 insertions(+), 64 deletions(-) diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index ccf39ef581..3326164f49 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -23,7 +23,20 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers. -Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products. +Tips:(c interface for example) +1. A consumption group consumes all data under the same topic, and different consumption groups are independent of each other; +2. A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data; +3. On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups; +4. Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block; +5. If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data; +6. If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready; +7. The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for; +8. Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again; +9. Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data; +10. Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported; +11. Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number; +12. The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four; +13. Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products. ## Data Schema and API @@ -33,40 +46,59 @@ The related schemas and APIs in various languages are described as follows: ```c -typedef struct tmq_t tmq_t; -typedef struct tmq_conf_t tmq_conf_t; -typedef struct tmq_list_t tmq_list_t; + typedef struct tmq_t tmq_t; + typedef struct tmq_conf_t tmq_conf_t; + typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); -DLL_EXPORT const char *tmq_err2str(int32_t code); + typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +} tmq_conf_res_t; -DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); -DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; +} tmq_topic_assignment; -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; -typedef enum tmq_conf_res_t tmq_conf_res_t; + DLL_EXPORT tmq_conf_t *tmq_conf_new(); + DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); + DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); + DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + DLL_EXPORT tmq_list_t *tmq_list_new(); + DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); + DLL_EXPORT void tmq_list_destroy(tmq_list_t *); + DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); + DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + + DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); + DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); + DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); + DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); + DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); + DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); + DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); + DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); + DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); + DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); + DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); + + DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); + DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); + DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); + DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); + DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); ``` -For more information, see [C/C++ Connector](/reference/connector/cpp). - The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below. diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index 38b91d7cea..6852d5551e 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -25,7 +25,20 @@ import CDemo from "./_sub_c.mdx"; 本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。 -注意:数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似。 +说明(以c接口为例): +1. 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立; +2. 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据; +3. 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联; +4. 每次poll服务端会返回一个结果block,该block属于一个vgroup,可能包含多个wal版本的数据,可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset; +5. 一个消费组如果从未commit过offset,当其成员消费者重启重新拉取数据时,均从参数auto.offset.reset设定值开始消费;在一个消费者生命周期中,客户端本地记录了最近一次拉取数据的offset,不会拉取重复数据; +6. 消费者如果异常终止(没有调用tmq_close),需等约12秒后触发其所属消费组rebalance,该消费者在服务端状态变为LOST,约1天后该消费者自动被删除;正常退出,退出后就会删除消费者;新增消费者,需等约2秒触发rebalance,该消费者在服务端状态变为ready; +7. 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配,消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作; +8. 消费者可利用 tmq_position 获得当前消费的offset,并seek到指定offset,重新消费; +9. seek将position指向指定offset,不执行commit操作,一旦seek成功,可poll拉取指定offset及以后的数据; +10. seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法,如非法将报错; +11. tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset,当seek至该offset时,将消费到这个block里的全部数据。参见第四点; +12. 由于存在 WAL 过期删除机制,即使seek 操作成功,poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号,将会从WAL最小版本号消费; +13. 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似; ## 主要数据结构和 API @@ -35,39 +48,60 @@ import CDemo from "./_sub_c.mdx"; ```c -typedef struct tmq_t tmq_t; -typedef struct tmq_conf_t tmq_conf_t; -typedef struct tmq_list_t tmq_list_t; + typedef struct tmq_t tmq_t; + typedef struct tmq_conf_t tmq_conf_t; + typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); -DLL_EXPORT const char *tmq_err2str(int32_t code); + typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +} tmq_conf_res_t; -DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); -DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; +} tmq_topic_assignment; -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; -typedef enum tmq_conf_res_t tmq_conf_res_t; + DLL_EXPORT tmq_conf_t *tmq_conf_new(); + DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); + DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); + DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + DLL_EXPORT tmq_list_t *tmq_list_new(); + DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); + DLL_EXPORT void tmq_list_destroy(tmq_list_t *); + DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); + DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + + DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); + DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); + DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); + DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); + DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); + DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); + DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); + DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); + DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); + DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); + DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); + + DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); + DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); + DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); + DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); + DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); ``` -这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。 +下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。 diff --git a/docs/zh/08-connector/10-cpp.mdx b/docs/zh/08-connector/10-cpp.mdx index 53c4bca755..9c5095f09c 100644 --- a/docs/zh/08-connector/10-cpp.mdx +++ b/docs/zh/08-connector/10-cpp.mdx @@ -515,12 +515,3 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 - 带_raw的接口通过传递的参数lines指针和长度len来表示数据,为了解决原始接口数据包含'\0'而被截断的问题。totalRows指针返回解析出来的数据行数。 - 带_ttl的接口可以传递ttl参数来控制建表的ttl到期时间。 - 带_reqid的接口可以通过传递reqid参数来追踪整个的调用链。 - -### 数据订阅 API - -除了使用 SQL 方式或者使用参数绑定 API 写入数据外,还可以使用 Schemaless 的方式完成写入。Schemaless 可以免于预先创建超级表/数据子表的数据结构,而是可以直接写入数据,TDengine 系统会根据写入的数据内容自动创建和维护所需要的表结构。Schemaless 的使用方式详见 [Schemaless 写入](/reference/schemaless/) 章节,这里介绍与之配套使用的 C/C++ API。 - -- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` - -**功能说明** -该接口将行协议的文本数据写入到 TDengine 中。 From 90cb67d006693a86cfc52b8675f2a98f451be537 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 14 Aug 2023 17:33:13 +0800 Subject: [PATCH 70/78] fix: table count issue --- tests/develop-test/2-query/table_count_scan.py | 16 ++++++++-------- tests/script/tsim/query/sys_tbname.sim | 2 +- tests/script/tsim/query/tableCount.sim | 6 +++--- tests/system-test/0-others/information_schema.py | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/develop-test/2-query/table_count_scan.py b/tests/develop-test/2-query/table_count_scan.py index 758d28948d..d6e49964eb 100644 --- a/tests/develop-test/2-query/table_count_scan.py +++ b/tests/develop-test/2-query/table_count_scan.py @@ -65,7 +65,7 @@ class TDTestCase: tdSql.query('select count(*),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') tdSql.checkRows(3) - tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 0, 23) tdSql.checkData(0, 1, 'information_schema') tdSql.checkData(0, 2, None) tdSql.checkData(1, 0, 3) @@ -77,7 +77,7 @@ class TDTestCase: tdSql.query('select count(1) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v desc;') tdSql.checkRows(3) - tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 0, 23) tdSql.checkData(0, 1, 'information_schema') tdSql.checkData(0, 2, None) tdSql.checkData(1, 0, 5) @@ -93,7 +93,7 @@ class TDTestCase: tdSql.checkData(1, 1, 'performance_schema') tdSql.checkData(0, 0, 3) tdSql.checkData(0, 1, 'tbl_count') - tdSql.checkData(2, 0, 24) + tdSql.checkData(2, 0, 23) tdSql.checkData(2, 1, 'information_schema') tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") @@ -106,7 +106,7 @@ class TDTestCase: tdSql.query('select count(*) from information_schema.ins_tables') tdSql.checkRows(1) - tdSql.checkData(0, 0, 32) + tdSql.checkData(0, 0, 31) tdSql.execute('create table stba (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') @@ -189,7 +189,7 @@ class TDTestCase: tdSql.checkData(2, 0, 5) tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.checkData(3, 0, 24) + tdSql.checkData(3, 0, 23) tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) @@ -204,7 +204,7 @@ class TDTestCase: tdSql.checkData(2, 0, 5) tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.checkData(3, 0, 24) + tdSql.checkData(3, 0, 23) tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) @@ -215,7 +215,7 @@ class TDTestCase: tdSql.checkData(0, 1, 'tbl_count') tdSql.checkData(1, 0, 5) tdSql.checkData(1, 1, 'performance_schema') - tdSql.checkData(2, 0, 24) + tdSql.checkData(2, 0, 23) tdSql.checkData(2, 1, 'information_schema') tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") @@ -228,7 +228,7 @@ class TDTestCase: tdSql.query('select count(*) from information_schema.ins_tables') tdSql.checkRows(1) - tdSql.checkData(0, 0, 33) + tdSql.checkData(0, 0, 32) tdSql.execute('drop database tbl_count') diff --git a/tests/script/tsim/query/sys_tbname.sim b/tests/script/tsim/query/sys_tbname.sim index f49a8e0a7d..6ed978aa15 100644 --- a/tests/script/tsim/query/sys_tbname.sim +++ b/tests/script/tsim/query/sys_tbname.sim @@ -58,7 +58,7 @@ endi sql select tbname from information_schema.ins_tables; print $rows $data00 -if $rows != 33 then +if $rows != 32 then return -1 endi if $data00 != @ins_tables@ then diff --git a/tests/script/tsim/query/tableCount.sim b/tests/script/tsim/query/tableCount.sim index 6e65852dcc..524b43620c 100644 --- a/tests/script/tsim/query/tableCount.sim +++ b/tests/script/tsim/query/tableCount.sim @@ -53,7 +53,7 @@ sql select stable_name,count(table_name) from information_schema.ins_tables grou if $rows != 3 then return -1 endi -if $data01 != 30 then +if $data01 != 29 then return -1 endi if $data11 != 10 then @@ -72,7 +72,7 @@ endi if $data11 != 5 then return -1 endi -if $data21 != 24 then +if $data21 != 23 then return -1 endi if $data31 != 5 then @@ -97,7 +97,7 @@ endi if $data42 != 3 then return -1 endi -if $data52 != 24 then +if $data52 != 23 then return -1 endi if $data62 != 5 then diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index 762361f051..f7788d1d50 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -55,7 +55,7 @@ class TDTestCase: ] self.binary_str = 'taosdata' self.nchar_str = '涛思数据' - self.ins_list = ['ins_dnodes','ins_mnodes','ins_modules','ins_qnodes','ins_snodes','ins_cluster','ins_databases','ins_functions',\ + self.ins_list = ['ins_dnodes','ins_mnodes','ins_qnodes','ins_snodes','ins_cluster','ins_databases','ins_functions',\ 'ins_indexes','ins_stables','ins_tables','ins_tags','ins_columns','ins_users','ins_grants','ins_vgroups','ins_configs','ins_dnode_variables',\ 'ins_topics','ins_subscriptions','ins_streams','ins_stream_tasks','ins_vnodes','ins_user_privileges'] self.perf_list = ['perf_connections','perf_queries','perf_consumers','perf_trans','perf_apps'] From 012248b68142496f0e8e2fa1833b4df4912b2c82 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 14 Aug 2023 19:26:53 +0800 Subject: [PATCH 71/78] fix: move the only ctb idx flag to logical plan --- include/libs/nodes/plannodes.h | 1 + source/libs/nodes/src/nodesCodeFuncs.c | 10 ++++++++-- source/libs/planner/src/planOptimizer.c | 3 +++ source/libs/planner/src/planPhysiCreater.c | 2 +- 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 0830dc4918..3e24e417fc 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -107,6 +107,7 @@ typedef struct SScanLogicNode { bool sortPrimaryKey; bool igLastNull; bool groupOrderScan; + bool onlyMetaCtbIdx; // for tag scan with no tbname } SScanLogicNode; typedef struct SJoinLogicNode { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index a2de0bc63a..4dfc55c0fa 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -660,6 +660,7 @@ static const char* jkScanLogicPlanDynamicScanFuncs = "DynamicScanFuncs"; static const char* jkScanLogicPlanDataRequired = "DataRequired"; static const char* jkScanLogicPlanTagCond = "TagCond"; static const char* jkScanLogicPlanGroupTags = "GroupTags"; +static const char* jkScanLogicPlanOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) { const SScanLogicNode* pNode = (const SScanLogicNode*)pObj; @@ -701,7 +702,9 @@ static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = nodeListToJson(pJson, jkScanLogicPlanGroupTags, pNode->pGroupTags); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkScanLogicPlanOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); + } return code; } @@ -746,7 +749,10 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkScanLogicPlanGroupTags, &pNode->pGroupTags); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkScanLogicPlanOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); + } + return code; } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 16440be511..6944fc9f18 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -2679,6 +2679,9 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp } nodesDestroyNode((SNode*)pAgg); tagScanOptCloneAncestorSlimit((SLogicNode*)pScanNode); + + pScanNode->onlyMetaCtbIdx = false; + pCxt->optimized = true; return TSDB_CODE_SUCCESS; } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 8efa9c1048..5f78b5de9c 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -520,7 +520,7 @@ static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpla } vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); - pScan->onlyMetaCtbIdx = false; + pScan->onlyMetaCtbIdx = pScanLogicNode->onlyMetaCtbIdx; return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); } From 8639c22cc0cd5e684710fe4dfda644a6e0362e54 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 14:53:58 +0800 Subject: [PATCH 72/78] vnode, common: USE_COS def --- source/common/CMakeLists.txt | 8 ++++++++ source/dnode/vnode/CMakeLists.txt | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index 356ea2be1c..b010467f20 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -16,6 +16,14 @@ ENDIF () IF (TD_STORAGE) ADD_DEFINITIONS(-D_STORAGE) TARGET_LINK_LIBRARIES(common PRIVATE storage) + + IF(${TD_LINUX}) + IF(${BUILD_WITH_COS}) + add_definitions(-DUSE_COS) + ENDIF(${BUILD_WITH_COS}) + + ENDIF(${TD_LINUX}) + ENDIF () target_include_directories( diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 052b6be37f..c70df86e20 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -189,6 +189,10 @@ target_include_directories( PUBLIC "$ENV{HOME}/.cos-local.1/include" ) +if(${BUILD_WITH_COS}) + add_definitions(-DUSE_COS) +endif(${BUILD_WITH_COS}) + endif(${TD_LINUX}) IF (TD_GRANT) From bd758e0269786711de840303a921b93b4e097d2c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 15:14:34 +0800 Subject: [PATCH 73/78] retention: remove old files last --- source/dnode/vnode/src/inc/vndCos.h | 2 +- source/dnode/vnode/src/tsdb/tsdbRetention.c | 23 +++++++++++---------- source/dnode/vnode/src/vnd/vnodeCos.c | 11 ++++++++-- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index f6db7f096e..cf2c5eb441 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -26,7 +26,7 @@ extern int8_t tsS3Enabled; int32_t s3Init(); void s3CleanUp(); -void s3PutObjectFromFile(const char *file, const char *object); +int32_t s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); bool s3Get(const char *object_name, const char *path); diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index ebe20c0e85..46a5d19a1a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -114,7 +114,8 @@ static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile TSDB_CHECK_CODE(code, lino, _exit); char *object_name = taosDirEntryBaseName(fname); - s3PutObjectFromFile(from->fname, object_name); + code = s3PutObjectFromFile(from->fname, object_name); + TSDB_CHECK_CODE(code, lino, _exit); taosCloseFile(&fdFrom); @@ -178,16 +179,6 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const int32_t lino = 0; STFileOp op = {0}; - // remove old - op = (STFileOp){ - .optype = TSDB_FOP_REMOVE, - .fid = fobj->f->fid, - .of = fobj->f[0], - }; - - code = TARRAY2_APPEND(rtner->fopArr, op); - TSDB_CHECK_CODE(code, lino, _exit); - // create new op = (STFileOp){ .optype = TSDB_FOP_CREATE, @@ -213,6 +204,16 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const code = tsdbCopyFileS3(rtner, fobj, &op.nf); TSDB_CHECK_CODE(code, lino, _exit); + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + _exit: if (code) { TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index b28b7ad747..02021831bf 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -51,7 +51,8 @@ static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { options->ctl = cos_http_controller_create(options->pool, 0); } -void s3PutObjectFromFile(const char *file_str, const char *object_str) { +int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) { + int32_t code = 0; cos_pool_t *p = NULL; int is_cname = 0; cos_status_t *s = NULL; @@ -76,6 +77,12 @@ void s3PutObjectFromFile(const char *file_str, const char *object_str) { log_status(s); cos_pool_destroy(p); + + if (s->code != 200) { + return code = s->code; + } + + return code; } void s3DeleteObjects(const char *object_name[], int nobject) { @@ -300,7 +307,7 @@ long s3Size(const char *object_name) { int32_t s3Init() { return 0; } void s3CleanUp() {} -void s3PutObjectFromFile(const char *file, const char *object) {} +int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; } void s3DeleteObjects(const char *object_name[], int nobject) {} bool s3Exists(const char *object_name) { return false; } bool s3Get(const char *object_name, const char *path) { return false; } From c4e5cfd2fbb0d2a85d141ee57d6e03b095c31061 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 15:40:40 +0800 Subject: [PATCH 74/78] retention: return code from copy --- source/dnode/vnode/src/tsdb/tsdbRetention.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index 46a5d19a1a..267e8b4117 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -179,6 +179,16 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const int32_t lino = 0; STFileOp op = {0}; + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + // create new op = (STFileOp){ .optype = TSDB_FOP_CREATE, @@ -204,16 +214,6 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const code = tsdbCopyFileS3(rtner, fobj, &op.nf); TSDB_CHECK_CODE(code, lino, _exit); - // remove old - op = (STFileOp){ - .optype = TSDB_FOP_REMOVE, - .fid = fobj->f->fid, - .of = fobj->f[0], - }; - - code = TARRAY2_APPEND(rtner->fopArr, op); - TSDB_CHECK_CODE(code, lino, _exit); - _exit: if (code) { TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); From 84e472ad03b71aabd88670184d1de52c9b85dd3e Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 15 Aug 2023 16:10:54 +0800 Subject: [PATCH 75/78] enhance: tag cond col list only once and tag scan derive from scan --- include/libs/nodes/plannodes.h | 10 +---- source/libs/command/src/explain.c | 16 +++---- source/libs/executor/inc/executorInt.h | 7 +++ source/libs/executor/src/scanoperator.c | 36 +++++++-------- source/libs/nodes/src/nodesCloneFuncs.c | 9 +--- source/libs/nodes/src/nodesCodeFuncs.c | 48 ++------------------ source/libs/nodes/src/nodesMsgFuncs.c | 58 +++---------------------- 7 files changed, 41 insertions(+), 143 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 3e24e417fc..4529520ace 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -336,15 +336,7 @@ typedef struct SScanPhysiNode { } SScanPhysiNode; typedef struct STagScanPhysiNode { - // SScanPhysiNode scan; //TODO? - SPhysiNode node; - SNodeList* pScanCols; - SNodeList* pScanPseudoCols; - uint64_t uid; // unique id of the table - uint64_t suid; - int8_t tableType; - SName tableName; - bool groupOrderScan; + SScanPhysiNode scan; bool onlyMetaCtbIdx; //no tbname, tag index not used. } STagScanPhysiNode; diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index e917de33dd..e167b31ef8 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -291,17 +291,17 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i switch (pNode->type) { case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: { STagScanPhysiNode *pTagScanNode = (STagScanPhysiNode *)pNode; - EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->tableName.tname); + EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->scan.tableName.tname); EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); if (pResNode->pExecInfo) { QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - if (pTagScanNode->pScanPseudoCols) { - EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->pScanPseudoCols->length); + if (pTagScanNode->scan.pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->scan.pScanPseudoCols->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->scan.node.pOutputDataBlockDesc->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -309,11 +309,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i if (verbose) { EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, - nodesGetOutputNumFromSlotList(pTagScanNode->node.pOutputDataBlockDesc->pSlots)); + nodesGetOutputNumFromSlotList(pTagScanNode->scan.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); - EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->outputRowSize); - EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->node.pLimit); - EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->node.pSlimit); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->scan.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->scan.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->scan.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index cb066d809c..dad15dc6bc 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -251,6 +251,12 @@ typedef struct STableMergeScanInfo { SSortExecInfo sortExecInfo; } STableMergeScanInfo; +typedef struct STagScanFilterContext { + SHashObj* colHash; + int32_t index; + SArray* cInfoList; +} STagScanFilterContext; + typedef struct STagScanInfo { SColumnInfo* pCols; SSDataBlock* pRes; @@ -263,6 +269,7 @@ typedef struct STagScanInfo { void* pCtbCursor; SNode* pTagCond; SNode* pTagIndexCond; + STagScanFilterContext filterCtx; SArray* aUidTags; // SArray SArray* aFilterIdxs; // SArray SStorageAPI* pStorageAPI; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71352b1c6e..ef28875be4 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2719,12 +2719,6 @@ static int32_t tagScanCreateResultData(SDataType* pType, int32_t numOfRows, SSca return TSDB_CODE_SUCCESS; } -typedef struct STagScanFilterContext { - SHashObj* colHash; - int32_t index; - SArray* cInfoList; -} STagScanFilterContext; - static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { SColumnNode* pSColumnNode = NULL; if (QUERY_NODE_COLUMN == nodeType((*pNode))) { @@ -2767,17 +2761,11 @@ static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { } -static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI) { +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI, STagScanInfo* pInfo) { int32_t code = 0; int32_t numOfTables = taosArrayGetSize(aUidTags); - STagScanFilterContext ctx = {0}; - ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); - ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); - - nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&ctx); - - SSDataBlock* pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); + SSDataBlock* pResBlock = createTagValBlockForFilter(pInfo->filterCtx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); @@ -2801,8 +2789,7 @@ static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aF blockDataDestroy(pResBlock); taosArrayDestroy(pBlockList); - taosHashCleanup(ctx.colHash); - taosArrayDestroy(ctx.cInfoList); + } static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { @@ -2911,7 +2898,7 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { bool ignoreFilterIdx = true; if (pInfo->pTagCond != NULL) { ignoreFilterIdx = false; - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI); + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI, pInfo); } else { ignoreFilterIdx = true; } @@ -2991,7 +2978,8 @@ static void destroyTagScanOperatorInfo(void* param) { if (pInfo->pCtbCursor != NULL) { pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); } - + taosHashCleanup(pInfo->filterCtx.colHash); + taosArrayDestroy(pInfo->filterCtx.cInfoList); taosArrayDestroy(pInfo->aFilterIdxs); taosArrayDestroyEx(pInfo->aUidTags, tagScanFreeUidTag); @@ -3001,8 +2989,9 @@ static void destroyTagScanOperatorInfo(void* param) { taosMemoryFreeClear(param); } -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pTagScanNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { + SScanPhysiNode* pPhyNode = (STagScanPhysiNode*)pTagScanNode; STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -3040,11 +3029,16 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); - if (pPhyNode->onlyMetaCtbIdx) { + if (pTagScanNode->onlyMetaCtbIdx) { pInfo->aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); pInfo->aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + pInfo->filterCtx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + pInfo->filterCtx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + if (pInfo->pTagCond != NULL) { + nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&pInfo->filterCtx); + } } - __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; + __optr_fn_t tagScanNextFn = (pTagScanNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 965af41fa7..d3cbaac5e1 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -564,14 +564,7 @@ static int32_t physiScanCopy(const SScanPhysiNode* pSrc, SScanPhysiNode* pDst) { } static int32_t physiTagScanCopy(const STagScanPhysiNode* pSrc, STagScanPhysiNode* pDst) { - COPY_BASE_OBJECT_FIELD(node, physiNodeCopy); - CLONE_NODE_LIST_FIELD(pScanCols); - CLONE_NODE_LIST_FIELD(pScanPseudoCols); - COPY_SCALAR_FIELD(uid); - COPY_SCALAR_FIELD(suid); - COPY_SCALAR_FIELD(tableType); - COPY_OBJECT_FIELD(tableName, sizeof(SName)); - COPY_SCALAR_FIELD(groupOrderScan); + COPY_BASE_OBJECT_FIELD(scan, physiScanCopy); COPY_SCALAR_FIELD(onlyMetaCtbIdx); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 4dfc55c0fa..64a4e0e7d3 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1630,28 +1630,8 @@ static const char* jkTagScanPhysiOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; - int32_t code = physicPlanNodeToJson(pObj, pJson); - if (TSDB_CODE_SUCCESS == code) { - code = nodeListToJson(pJson, jkScanPhysiPlanScanCols, pNode->pScanCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = nodeListToJson(pJson, jkScanPhysiPlanScanPseudoCols, pNode->pScanPseudoCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableId, pNode->uid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanSTableId, pNode->suid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableType, pNode->tableType); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddObject(pJson, jkScanPhysiPlanTableName, nameToJson, &pNode->tableName); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddBoolToObject(pJson, jkScanPhysiPlanGroupOrderScan, pNode->groupOrderScan); - } + int32_t code = physiScanNodeToJson(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkTagScanPhysiOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); } @@ -1661,28 +1641,8 @@ static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; - int32_t code = jsonToPhysicPlanNode(pJson, pObj); - if (TSDB_CODE_SUCCESS == code) { - code = jsonToNodeList(pJson, jkScanPhysiPlanScanCols, &pNode->pScanCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = jsonToNodeList(pJson, jkScanPhysiPlanScanPseudoCols, &pNode->pScanPseudoCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanTableId, &pNode->uid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanSTableId, &pNode->suid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetTinyIntValue(pJson, jkScanPhysiPlanTableType, &pNode->tableType); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonToObject(pJson, jkScanPhysiPlanTableName, jsonToName, &pNode->tableName); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetBoolValue(pJson, jkScanPhysiPlanGroupOrderScan, &pNode->groupOrderScan); - } + int32_t code = jsonToPhysiScanNode(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 4d1120861d..cade77fc17 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2004,42 +2004,15 @@ static int32_t msgToPhysiScanNode(STlvDecoder* pDecoder, void* pObj) { } enum { - PHY_TAG_SCAN_CODE_BASE_NODE = 1, - PHY_TAG_SCAN_CODE_SCAN_COLS, - PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, - PHY_TAG_SCAN_CODE_BASE_UID, - PHY_TAG_SCAN_CODE_BASE_SUID, - PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, - PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, - PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, + PHY_TAG_SCAN_CODE_SCAN = 1, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX }; static int32_t physiTagScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; - int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_NODE, physiNodeToMsg, &pNode->node); - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_COLS, nodeListToMsg, pNode->pScanCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, nodeListToMsg, pNode->pScanPseudoCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_UID, pNode->uid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_SUID, pNode->suid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeI8(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, pNode->tableType); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, nameToMsg, &pNode->tableName); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, pNode->groupOrderScan); - } + int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN, physiScanNodeToMsg, &pNode->scan); + if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX, pNode->onlyMetaCtbIdx); } @@ -2053,29 +2026,8 @@ static int32_t msgToPhysiTagScanNode(STlvDecoder* pDecoder, void* pObj) { STlv* pTlv = NULL; tlvForEach(pDecoder, pTlv, code) { switch (pTlv->type) { - case PHY_TAG_SCAN_CODE_BASE_NODE: - code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node); - break; - case PHY_TAG_SCAN_CODE_SCAN_COLS: - code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanCols); - break; - case PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS: - code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanPseudoCols); - break; - case PHY_TAG_SCAN_CODE_BASE_UID: - code = tlvDecodeU64(pTlv, &pNode->uid); - break; - case PHY_TAG_SCAN_CODE_BASE_SUID: - code = tlvDecodeU64(pTlv, &pNode->suid); - break; - case PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE: - code = tlvDecodeI8(pTlv, &pNode->tableType); - break; - case PHY_TAG_SCAN_CODE_BASE_TABLE_NAME: - code = tlvDecodeObjFromTlv(pTlv, msgToName, &pNode->tableName); - break; - case PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN: - code = tlvDecodeBool(pTlv, &pNode->groupOrderScan); + case PHY_TAG_SCAN_CODE_SCAN: + code = tlvDecodeObjFromTlv(pTlv, msgToPhysiScanNode, &pNode->scan); break; case PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX: code = tlvDecodeBool(pTlv, &pNode->onlyMetaCtbIdx); From 1792bf306e9be53779425a59b60af7ef43242d96 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 16:54:26 +0800 Subject: [PATCH 76/78] vnode: fix cos cache evicting --- source/dnode/vnode/src/vnd/vnodeCos.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 02021831bf..4c76538eb2 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -212,19 +212,20 @@ static int32_t evictFileCompareAsce(const void *pLeft, const void *pRight) { void s3EvictCache(const char *path, long object_size) { SDiskSize disk_size = {0}; - if (taosGetDiskSize((char *)path, &disk_size) < 0) { + char dir_name[TSDB_FILENAME_LEN] = "\0"; + + tstrncpy(dir_name, path, TSDB_FILENAME_LEN); + taosDirName(dir_name); + + if (taosGetDiskSize((char *)dir_name, &disk_size) < 0) { terrno = TAOS_SYSTEM_ERROR(errno); vError("failed to get disk:%s size since %s", path, terrstr()); return; } - if (object_size >= disk_size.avail + 1 << 30) { + if (object_size >= disk_size.avail - (1 << 30)) { // evict too old files // 1, list data files' atime under dir(path) - char dir_name[TSDB_FILENAME_LEN] = "\0"; - tstrncpy(dir_name, path, TSDB_FILENAME_LEN); - taosDirName(dir_name); - tdbDirPtr pDir = taosOpenDir(dir_name); if (pDir == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); @@ -236,9 +237,14 @@ void s3EvictCache(const char *path, long object_size) { char *name = taosGetDirEntryName(pDirEntry); if (!strncmp(name + strlen(name) - 5, ".data", 5)) { SEvictFile e_file = {0}; + char entry_name[TSDB_FILENAME_LEN] = "\0"; + int dir_len = strlen(dir_name); - tstrncpy(e_file.name, name, TSDB_FILENAME_LEN); - taosStatFile(name, &e_file.size, NULL, &e_file.atime); + memcpy(e_file.name, dir_name, dir_len); + e_file.name[dir_len] = '/'; + memcpy(e_file.name + dir_len + 1, name, strlen(name)); + + taosStatFile(e_file.name, &e_file.size, NULL, &e_file.atime); taosArrayPush(evict_files, &e_file); } From 450d7e2d3c6cdebf2dc6fbf2a279666e45efd576 Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 15 Aug 2023 16:56:15 +0800 Subject: [PATCH 77/78] enhance: compilation error --- source/libs/nodes/src/nodesCodeFuncs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 64a4e0e7d3..48c9bf33dd 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1641,7 +1641,7 @@ static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; - int32_t code = jsonToPhysiScanNode(pObj, pJson); + int32_t code = jsonToPhysiScanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); From 49e4b11547c03cfd7ed5dc993d2fa6e42bab9a8b Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 15 Aug 2023 17:00:53 +0800 Subject: [PATCH 78/78] fix: fix compilation error --- source/libs/executor/src/scanoperator.c | 2 +- source/libs/nodes/test/nodesCloneTest.cpp | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ef28875be4..d7d97cc514 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2991,7 +2991,7 @@ static void destroyTagScanOperatorInfo(void* param) { SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pTagScanNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { - SScanPhysiNode* pPhyNode = (STagScanPhysiNode*)pTagScanNode; + SScanPhysiNode* pPhyNode = (SScanPhysiNode*)pTagScanNode; STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { diff --git a/source/libs/nodes/test/nodesCloneTest.cpp b/source/libs/nodes/test/nodesCloneTest.cpp index e1e99abab3..8b8893d317 100644 --- a/source/libs/nodes/test/nodesCloneTest.cpp +++ b/source/libs/nodes/test/nodesCloneTest.cpp @@ -199,9 +199,10 @@ TEST_F(NodesCloneTest, physiScan) { ASSERT_EQ(nodeType(pSrc), nodeType(pDst)); STagScanPhysiNode* pSrcNode = (STagScanPhysiNode*)pSrc; STagScanPhysiNode* pDstNode = (STagScanPhysiNode*)pDst; - ASSERT_EQ(pSrcNode->uid, pDstNode->uid); - ASSERT_EQ(pSrcNode->suid, pDstNode->suid); - ASSERT_EQ(pSrcNode->tableType, pDstNode->tableType); + ASSERT_EQ(pSrcNode->scan.uid, pDstNode->scan.uid); + ASSERT_EQ(pSrcNode->scan.suid, pDstNode->scan.suid); + ASSERT_EQ(pSrcNode->scan.tableType, pDstNode->scan.tableType); + ASSERT_EQ(pSrcNode->onlyMetaCtbIdx, pDstNode->onlyMetaCtbIdx); }); std::unique_ptr srcNode(nullptr, nodesDestroyNode);