From 88d755be76d3b75bcebf9b114363ce0b5b4dcae1 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 13:31:00 +0800 Subject: [PATCH 01/81] feat(tsdb/cos): s3 migration --- cmake/cmake.options | 6 + cmake/cos_CMakeLists.txt.in | 12 + contrib/CMakeLists.txt | 23 + contrib/test/CMakeLists.txt | 5 + contrib/test/cos/CMakeLists.txt | 49 + contrib/test/cos/main.c | 3090 +++++++++++++++++++ source/common/src/tglobal.c | 68 +- source/dnode/vnode/CMakeLists.txt | 28 +- source/dnode/vnode/src/inc/vndCos.h | 36 + source/dnode/vnode/src/tsdb/tsdbRetention.c | 120 +- source/dnode/vnode/src/vnd/vnodeCos.c | 114 + source/dnode/vnode/src/vnd/vnodeModule.c | 5 + 12 files changed, 3530 insertions(+), 26 deletions(-) create mode 100644 cmake/cos_CMakeLists.txt.in create mode 100644 contrib/test/cos/CMakeLists.txt create mode 100644 contrib/test/cos/main.c create mode 100644 source/dnode/vnode/src/inc/vndCos.h create mode 100644 source/dnode/vnode/src/vnd/vnodeCos.c diff --git a/cmake/cmake.options b/cmake/cmake.options index fa0b888415..ea5efcb13a 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -125,6 +125,12 @@ option( ON ) +option( + BUILD_WITH_COS + "If build with cos" + ON +) + option( BUILD_WITH_SQLITE "If build with sqlite" diff --git a/cmake/cos_CMakeLists.txt.in b/cmake/cos_CMakeLists.txt.in new file mode 100644 index 0000000000..ee1e58b50f --- /dev/null +++ b/cmake/cos_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# cos +ExternalProject_Add(cos + GIT_REPOSITORY https://github.com/tencentyun/cos-c-sdk-v5.git + GIT_TAG v5.0.16 + SOURCE_DIR "${TD_CONTRIB_DIR}/cos-c-sdk-v5" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index c60fd33b16..df9519d00f 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -122,6 +122,12 @@ if(${BUILD_WITH_SQLITE}) cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif(${BUILD_WITH_SQLITE}) +# cos +if(${BUILD_WITH_COS}) + cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + add_definitions(-DUSE_COS) +endif(${BUILD_WITH_COS}) + # lucene if(${BUILD_WITH_LUCENE}) cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -347,6 +353,23 @@ if (${BUILD_WITH_ROCKSDB}) endif() endif() +# cos +if(${BUILD_WITH_COS}) + option(ENABLE_TEST "Enable the tests" OFF) + + set(CMAKE_BUILD_TYPE debug) + set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) + set(CMAKE_PROJECT_NAME cos_c_sdk) + + add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL) + target_include_directories( + cos_c_sdk + PUBLIC $ + ) + + set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) +endif(${BUILD_WITH_COS}) + # lucene # To support build on ubuntu: sudo apt-get install libboost-all-dev if(${BUILD_WITH_LUCENE}) diff --git a/contrib/test/CMakeLists.txt b/contrib/test/CMakeLists.txt index f35cf0d13d..1deff5a67e 100644 --- a/contrib/test/CMakeLists.txt +++ b/contrib/test/CMakeLists.txt @@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB}) add_subdirectory(rocksdb) endif(${BUILD_WITH_ROCKSDB}) +# cos +if(${BUILD_WITH_COS}) + add_subdirectory(cos) +endif(${BUILD_WITH_COS}) + if(${BUILD_WITH_LUCENE}) add_subdirectory(lucene) endif(${BUILD_WITH_LUCENE}) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt new file mode 100644 index 0000000000..77c57e5a65 --- /dev/null +++ b/contrib/test/cos/CMakeLists.txt @@ -0,0 +1,49 @@ +add_executable(cosTest "") +target_sources(cosTest + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/main.c" + ) + +#find_path(APR_INCLUDE_DIR apr-1/apr_time.h) +#find_path(APR_UTIL_INCLUDE_DIR apr/include/apr-1/apr_md5.h) +#find_path(MINIXML_INCLUDE_DIR mxml.h) +#find_path(CURL_INCLUDE_DIR curl/curl.h) + +#include_directories (${MINIXML_INCLUDE_DIR}) +#include_directories (${CURL_INCLUDE_DIR}) +FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) +#FIND_PROGRAM(APU_CONFIG_BIN NAMES apu-config apu-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) + +IF (APR_CONFIG_BIN) + EXECUTE_PROCESS( + COMMAND ${APR_CONFIG_BIN} --includedir + OUTPUT_VARIABLE APR_INCLUDE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +ENDIF() +#IF (APU_CONFIG_BIN) +# EXECUTE_PROCESS( +# COMMAND ${APU_CONFIG_BIN} --includedir +# OUTPUT_VARIABLE APR_UTIL_INCLUDE_DIR +# OUTPUT_STRIP_TRAILING_WHITESPACE +# ) +#ENDIF() + +include_directories (${APR_INCLUDE_DIR}) +#include_directories (${APR_UTIL_INCLUDE_DIR}) + +target_include_directories( + cosTest + PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + ) + +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) + +target_link_libraries(cosTest cos_c_sdk) +target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) +target_link_libraries(cosTest ${APR_LIBRARY}) +target_link_libraries(cosTest ${MINIXML_LIBRARY}) +target_link_libraries(cosTest ${CURL_LIBRARY}) diff --git a/contrib/test/cos/main.c b/contrib/test/cos/main.c new file mode 100644 index 0000000000..faaceee2e3 --- /dev/null +++ b/contrib/test/cos/main.c @@ -0,0 +1,3090 @@ +#include +#include +#include +#include + +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + +// endpoint 是 COS 访问域名信息,详情请参见 https://cloud.tencent.com/document/product/436/6224 文档 +// static char TEST_COS_ENDPOINT[] = "cos.ap-guangzhou.myqcloud.com"; +// static char TEST_COS_ENDPOINT[] = "http://oss-cn-beijing.aliyuncs.com"; +static char TEST_COS_ENDPOINT[] = "http://cos.ap-beijing.myqcloud.com"; +// 数据万象的访问域名,详情请参见 https://cloud.tencent.com/document/product/460/31066 文档 +static char TEST_CI_ENDPOINT[] = "https://ci.ap-guangzhou.myqcloud.com"; +// 开发者拥有的项目身份ID/密钥,可在 https://console.cloud.tencent.com/cam/capi 页面获取 +static char *TEST_ACCESS_KEY_ID; // your secret_id +static char *TEST_ACCESS_KEY_SECRET; // your secret_key +// 开发者访问 COS 服务时拥有的用户维度唯一资源标识,用以标识资源,可在 https://console.cloud.tencent.com/cam/capi +// 页面获取 +// static char TEST_APPID[] = ""; // your appid +// static char TEST_APPID[] = "119"; // your appid +static char TEST_APPID[] = "1309024725"; // your appid +// the cos bucket name, syntax: [bucket]-[appid], for example: mybucket-1253666666,可在 +// https://console.cloud.tencent.com/cos5/bucket 查看 static char TEST_BUCKET_NAME[] = ""; +// static char TEST_BUCKET_NAME[] = ""; +// static char TEST_BUCKET_NAME[] = "test-bucket-119"; +static char TEST_BUCKET_NAME[] = "test0711-1309024725"; +// 对象拥有者,比如用户UIN:100000000001 +static char TEST_UIN[] = ""; // your uin +// 地域信息,枚举值可参见 https://cloud.tencent.com/document/product/436/6224 +// 文档,例如:ap-beijing、ap-hongkong、eu-frankfurt 等 +static char TEST_REGION[] = "ap-guangzhou"; // region in endpoint +// 对象键,对象(Object)在存储桶(Bucket)中的唯一标识。有关对象与对象键的进一步说明,请参见 +// https://cloud.tencent.com/document/product/436/13324 文档 +static char TEST_OBJECT_NAME1[] = "1.txt"; +static char TEST_OBJECT_NAME2[] = "test2.dat"; +static char TEST_OBJECT_NAME3[] = "test3.dat"; +static char TEST_OBJECT_NAME4[] = "multipart.txt"; +// static char TEST_DOWNLOAD_NAME2[] = "download_test2.dat"; +static char *TEST_APPEND_NAMES[] = {"test.7z.001", "test.7z.002"}; +static char TEST_DOWNLOAD_NAME3[] = "download_test3.dat"; +static char TEST_MULTIPART_OBJECT[] = "multipart.dat"; +static char TEST_DOWNLOAD_NAME4[] = "multipart_download.dat"; +static char TEST_MULTIPART_FILE[] = "test.zip"; +// static char TEST_MULTIPART_OBJECT2[] = "multipart2.dat"; +static char TEST_MULTIPART_OBJECT3[] = "multipart3.dat"; +static char TEST_MULTIPART_OBJECT4[] = "multipart4.dat"; + +static void print_headers(cos_table_t *headers) { + const cos_array_header_t *tarr; + const cos_table_entry_t *telts; + int i = 0; + + if (apr_is_empty_table(headers)) { + return; + } + + tarr = cos_table_elts(headers); + telts = (cos_table_entry_t *)tarr->elts; + + printf("headers:\n"); + for (; i < tarr->nelts; i++) { + telts = (cos_table_entry_t *)(tarr->elts + i * tarr->elt_size); + printf("%s: %s\n", telts->key, telts->val); + } +} + +void init_test_config(cos_config_t *config, int is_cname) { + cos_str_set(&config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&config->appid, TEST_APPID); + config->is_cname = is_cname; +} + +void init_test_request_options(cos_request_options_t *options, int is_cname) { + options->config = cos_config_create(options->pool); + init_test_config(options->config, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); +} + +void log_status(cos_status_t *s) { + cos_warn_log("status->code: %d", s->code); + if (s->error_code) cos_warn_log("status->error_code: %s", s->error_code); + if (s->error_msg) cos_warn_log("status->error_msg: %s", s->error_msg); + if (s->req_id) cos_warn_log("status->req_id: %s", s->req_id); +} + +void test_sign() { + cos_pool_t *p = NULL; + const unsigned char secret_key[] = "your secret_key"; + const unsigned char time_str[] = "1480932292;1481012292"; + unsigned char sign_key[40]; + cos_buf_t *fmt_str; + const char *value = NULL; + const char *uri = "/testfile"; + const char *host = "testbucket-125000000.cn-north.myqcloud.com&range=bytes%3d0-3"; + unsigned char fmt_str_hex[40]; + + cos_pool_create(&p, NULL); + fmt_str = cos_create_buf(p, 1024); + + cos_get_hmac_sha1_hexdigest(sign_key, secret_key, sizeof(secret_key) - 1, time_str, sizeof(time_str) - 1); + char *pstr = apr_pstrndup(p, (char *)sign_key, sizeof(sign_key)); + cos_warn_log("sign_key: %s", pstr); + + // method + value = "get"; + cos_buf_append_string(p, fmt_str, value, strlen(value)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // canonicalized resource(URI) + cos_buf_append_string(p, fmt_str, uri, strlen(uri)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // query-parameters + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // Host + cos_buf_append_string(p, fmt_str, "host=", sizeof("host=") - 1); + cos_buf_append_string(p, fmt_str, host, strlen(host)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + char *pstr3 = apr_pstrndup(p, (char *)fmt_str->pos, cos_buf_size(fmt_str)); + cos_warn_log("Format string: %s", pstr3); + + // Format-String sha1hash + cos_get_sha1_hexdigest(fmt_str_hex, (unsigned char *)fmt_str->pos, cos_buf_size(fmt_str)); + + char *pstr2 = apr_pstrndup(p, (char *)fmt_str_hex, sizeof(fmt_str_hex)); + cos_warn_log("Format string sha1hash: %s", pstr2); + + cos_pool_destroy(p); +} + +void test_bucket() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_acl_e cos_acl = COS_ACL_PRIVATE; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // create test bucket + s = cos_create_bucket(options, &bucket, cos_acl, &resp_headers); + log_status(s); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->encoding_type, "url"); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + log_status(s); + cos_list_object_content_t *content = NULL; + char *line = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + line = apr_psprintf(p, "%.*s\t%.*s\t%.*s\n", content->key.len, content->key.data, content->size.len, + content->size.data, content->last_modified.len, content->last_modified.data); + printf("%s", line); + printf("next marker: %s\n", list_params->next_marker.data); + } + cos_list_object_common_prefix_t *common_prefix = NULL; + cos_list_for_each_entry(cos_list_object_common_prefix_t, common_prefix, &list_params->common_prefix_list, node) { + printf("common prefix: %s\n", common_prefix->prefix.data); + } + + // delete bucket + s = cos_delete_bucket(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_list_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //获取对象列表 + cos_list_object_params_t *list_params = NULL; + cos_list_object_content_t *content = NULL; + list_params = cos_create_list_object_params(p); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (cos_status_is_ok(s)) { + printf("list object succeeded\n"); + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %.*s\n", content->key.len, content->key.data); + } + } else { + printf("list object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_bucket_lifecycle() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_t rule_list; + cos_list_init(&rule_list); + cos_lifecycle_rule_content_t *rule_content = NULL; + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule1"); + cos_str_set(&rule_content->prefix, "abc/"); + cos_str_set(&rule_content->status, "Enabled"); + rule_content->expire.days = 365; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule2"); + cos_str_set(&rule_content->prefix, "efg/"); + cos_str_set(&rule_content->status, "Disabled"); + cos_str_set(&rule_content->transition.storage_class, "Standard_IA"); + rule_content->transition.days = 999; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule3"); + cos_str_set(&rule_content->prefix, "xxx/"); + cos_str_set(&rule_content->status, "Enabled"); + rule_content->abort.days = 1; + cos_list_add_tail(&rule_content->node, &rule_list); + + s = cos_put_bucket_lifecycle(options, &bucket, &rule_list, &resp_headers); + log_status(s); + + cos_list_t rule_list_ret; + cos_list_init(&rule_list_ret); + s = cos_get_bucket_lifecycle(options, &bucket, &rule_list_ret, &resp_headers); + log_status(s); + + cos_delete_bucket_lifecycle(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_put_object_with_limit() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + + //上传对象 + cos_str_set(&file, "test_file.bin"); + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object succeeded\n"); + } else { + printf("put object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_get_object_with_limit() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + + //下载对象 + cos_str_set(&file, "test_file.bin"); + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); + if (cos_status_is_ok(s)) { + printf("get object succeeded\n"); + } else { + printf("get object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_gen_object_url() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + printf("url:%s\n", cos_gen_object_url(options, &bucket, &object)); + + cos_pool_destroy(p); +} + +void test_create_dir() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_list_t buffer; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "folder/"); + + //上传文件夹 + cos_list_init(&buffer); + s = cos_put_object_from_buffer(options, &bucket, &object, &buffer, headers, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object succeeded\n"); + } else { + printf("put object failed\n"); + } + cos_pool_destroy(p); +} + +void test_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_list_t buffer; + cos_buf_t *content = NULL; + char *str = "This is my test data."; + cos_string_t file; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + cos_list_init(&buffer); + content = cos_buf_pack(options->pool, str, strlen(str)); + cos_list_add_tail(&content->node, &buffer); + s = cos_put_object_from_buffer(options, &bucket, &object, &buffer, headers, &resp_headers); + log_status(s); + + cos_list_t download_buffer; + cos_list_init(&download_buffer); + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + s = cos_get_object_to_buffer(options, &bucket, &object, headers, NULL, &download_buffer, &resp_headers); + log_status(s); + print_headers(resp_headers); + int64_t len = 0; + int64_t size = 0; + int64_t pos = 0; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { len += cos_buf_size(content); } + char *buf = cos_pcalloc(p, (apr_size_t)(len + 1)); + buf[len] = '\0'; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { + size = cos_buf_size(content); + memcpy(buf + pos, content->pos, (size_t)size); + pos += size; + } + cos_warn_log("Download data=%s", buf); + + cos_str_set(&file, TEST_OBJECT_NAME4); + cos_str_set(&object, TEST_OBJECT_NAME4); + s = cos_put_object_from_file(options, &bucket, &object, &file, NULL, &resp_headers); + log_status(s); + + cos_str_set(&file, TEST_DOWNLOAD_NAME3); + cos_str_set(&object, TEST_OBJECT_NAME3); + s = cos_get_object_to_file(options, &bucket, &object, NULL, NULL, &file, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME2); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_delete_object(options, &bucket, &object, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME3); + s = cos_delete_object(options, &bucket, &object, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_append_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //追加上传对象 + cos_str_set(&object, TEST_OBJECT_NAME3); + int32_t count = sizeof(TEST_APPEND_NAMES) / sizeof(char *); + int32_t index = 0; + int64_t position = 0; + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + if (s->code == 200) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + position = atol(content_length_str); + } + } + for (; index < count; index++) { + cos_str_set(&file, TEST_APPEND_NAMES[index]); + s = cos_append_object_from_file(options, &bucket, &object, position, &file, NULL, &resp_headers); + log_status(s); + + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + if (s->code == 200) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + position = atol(content_length_str); + } + } + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_head_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //获取对象元数据 + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + print_headers(resp_headers); + if (cos_status_is_ok(s)) { + printf("head object succeeded\n"); + } else { + printf("head object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_check_object_exist() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_object_exist_status_e object_exist; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + // 检查对象是否存在 + s = cos_check_object_exist(options, &bucket, &object, headers, &object_exist, &resp_headers); + if (object_exist == COS_OBJECT_NON_EXIST) { + printf("object: %.*s non exist.\n", object.len, object.data); + } else if (object_exist == COS_OBJECT_EXIST) { + printf("object: %.*s exist.\n", object.len, object.data); + } else { + printf("object: %.*s unknown status.\n", object.len, object.data); + log_status(s); + } + + cos_pool_destroy(p); +} + +void test_object_restore() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_status_t *s = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_restore.dat"); + + cos_object_restore_params_t *restore_params = cos_create_object_restore_params(p); + restore_params->days = 30; + cos_str_set(&restore_params->tier, "Standard"); + s = cos_post_object_restore(options, &bucket, &object, restore_params, NULL, NULL, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void progress_callback(int64_t consumed_bytes, int64_t total_bytes) { + printf("consumed_bytes = %" APR_INT64_T_FMT ", total_bytes = %" APR_INT64_T_FMT "\n", consumed_bytes, total_bytes); +} + +void test_put_object_from_file() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_table_t *headers = NULL; + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, TEST_OBJECT_NAME4); + cos_str_set(&object, TEST_OBJECT_NAME4); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_put_object_from_file_with_sse() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_table_t *headers = NULL; + headers = cos_table_make(p, 3); + // apr_table_add(headers, "x-cos-server-side-encryption", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-algorithm", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key", "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY="); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key-MD5", "U5L61r7jcwdNvT7frmUG8g=="); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, "/home/jojoliang/data/test.jpg"); + cos_str_set(&object, "pic"); + + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + { + int i = 0; + apr_array_header_t *pp = (apr_array_header_t *)apr_table_elts(resp_headers); + for (; i < pp->nelts; i++) { + apr_table_entry_t *ele = (apr_table_entry_t *)pp->elts + i; + printf("%s: %s\n", ele->key, ele->val); + } + } + + cos_pool_destroy(p); +} + +void test_get_object_to_file_with_sse() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + cos_table_t *headers = NULL; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 3); + /* + apr_table_add(headers, "x-cos-server-side-encryption", "AES256"); + */ + /* + apr_table_add(headers, "x-cos-server-side-encryption-customer-algorithm", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key", + "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY="); apr_table_add(headers, + "x-cos-server-side-encryption-customer-key-MD5", "U5L61r7jcwdNvT7frmUG8g=="); + */ + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, "getfile"); + cos_str_set(&object, TEST_OBJECT_NAME1); + + s = cos_get_object_to_file(options, &bucket, &object, headers, params, &file, &resp_headers); + log_status(s); + + { + int i = 0; + apr_array_header_t *pp = (apr_array_header_t *)apr_table_elts(resp_headers); + for (; i < pp->nelts; i++) { + apr_table_entry_t *ele = (apr_table_entry_t *)pp->elts + i; + printf("%s: %s\n", ele->key, ele->val); + } + } + + cos_pool_destroy(p); +} + +void multipart_upload_file_from_file() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *complete_headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_upload_file_t *upload_file = NULL; + cos_status_t *s = NULL; + cos_list_upload_part_params_t *params = NULL; + cos_list_t complete_part_list; + cos_list_part_content_t *part_content = NULL; + cos_complete_part_content_t *complete_part_content = NULL; + int part_num = 1; + int64_t pos = 0; + int64_t file_length = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + complete_headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + // init mulitipart + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + // upload part from file + int res = COSE_OK; + cos_file_buf_t *fb = cos_create_file_buf(p); + res = cos_open_file_for_all_read(p, TEST_MULTIPART_FILE, fb); + if (res != COSE_OK) { + cos_error_log("Open read file fail, filename:%s\n", TEST_MULTIPART_FILE); + return; + } + file_length = fb->file_last; + apr_file_close(fb->file); + while (pos < file_length) { + upload_file = cos_create_upload_file(p); + cos_str_set(&upload_file->filename, TEST_MULTIPART_FILE); + upload_file->file_pos = pos; + pos += 2 * 1024 * 1024; + upload_file->file_last = pos < file_length ? pos : file_length; // 2MB + s = cos_upload_part_from_file(options, &bucket, &object, &upload_id, part_num++, upload_file, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Multipart upload part from file succeeded\n"); + } else { + printf("Multipart upload part from file failed\n"); + } + } + + // list part + params = cos_create_list_upload_part_params(p); + params->max_ret = 1000; + cos_list_init(&complete_part_list); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, params, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("List multipart succeeded\n"); + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", part_content->part_number.data, + part_content->size.data, part_content->last_modified.data, part_content->etag.data); + } + } else { + printf("List multipart failed\n"); + cos_pool_destroy(p); + return; + } + + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, part_content->part_number.data); + cos_str_set(&complete_part_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + } + + // complete multipart + s = cos_complete_multipart_upload(options, &bucket, &object, &upload_id, &complete_part_list, complete_headers, + &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Complete multipart upload from file succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Complete multipart upload from file failed\n"); + } + + cos_pool_destroy(p); +} + +void multipart_upload_file_from_buffer() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *complete_headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_status_t *s = NULL; + cos_list_t complete_part_list; + cos_complete_part_content_t *complete_part_content = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + complete_headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + // init mulitipart + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + // upload part from buffer + char *str = "This is my test data...."; + cos_list_t buffer; + cos_buf_t *content; + + // 上传一个分块 + cos_list_init(&buffer); + content = cos_buf_pack(p, str, strlen(str)); + cos_list_add_tail(&content->node, &buffer); + s = cos_upload_part_from_buffer(options, &bucket, &object, &upload_id, 1, &buffer, &resp_headers); + + // 直接获取etag + char *etag = apr_pstrdup(p, (char *)apr_table_get(resp_headers, "ETag")); + cos_list_init(&complete_part_list); + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, "1"); + cos_str_set(&complete_part_content->etag, etag); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + + // 也可以通过 list part 获取取etag + /* + //list part + params = cos_create_list_upload_part_params(p); + params->max_ret = 1000; + cos_list_init(&complete_part_list); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, + params, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("List multipart succeeded\n"); + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", + part_content->part_number.data, + part_content->size.data, + part_content->last_modified.data, + part_content->etag.data); + } + } else { + printf("List multipart failed\n"); + cos_pool_destroy(p); + return; + } + + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, part_content->part_number.data); + cos_str_set(&complete_part_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + } + */ + + // complete multipart + s = cos_complete_multipart_upload(options, &bucket, &object, &upload_id, &complete_part_list, complete_headers, + &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Complete multipart upload from file succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Complete multipart upload from file failed\n"); + } + + cos_pool_destroy(p); +} + +void abort_multipart_upload() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_status_t *s = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + s = cos_abort_multipart_upload(options, &bucket, &object, &upload_id, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Abort multipart upload succeeded, upload_id::%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Abort multipart upload failed\n"); + } + + cos_pool_destroy(p); +} + +void list_multipart() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_status_t *s = NULL; + cos_list_multipart_upload_params_t *list_multipart_params = NULL; + cos_list_upload_part_params_t *list_upload_param = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + list_multipart_params = cos_create_list_multipart_upload_params(p); + list_multipart_params->max_ret = 999; + s = cos_list_multipart_upload(options, &bucket, list_multipart_params, &resp_headers); + log_status(s); + + list_upload_param = cos_create_list_upload_part_params(p); + list_upload_param->max_ret = 1000; + cos_string_t upload_id; + cos_str_set(&upload_id, "149373379126aee264fecbf5fe8ddb8b9cd23b76c73ab1af0bcfd50683cc4254f81ebe2386"); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, list_upload_param, &resp_headers); + if (cos_status_is_ok(s)) { + printf("List upload part succeeded, upload_id::%.*s\n", upload_id.len, upload_id.data); + cos_list_part_content_t *part_content = NULL; + cos_list_for_each_entry(cos_list_part_content_t, part_content, &list_upload_param->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", part_content->part_number.data, + part_content->size.data, part_content->last_modified.data, part_content->etag.data); + } + } else { + printf("List upload part failed\n"); + } + + cos_pool_destroy(p); +} + +void test_resumable() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t filepath; + cos_resumable_clt_params_t *clt_params; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT4); + cos_str_set(&filepath, TEST_DOWNLOAD_NAME4); + + clt_params = cos_create_resumable_clt_params_content(p, 5 * 1024 * 1024, 3, COS_FALSE, NULL); + s = cos_resumable_download_file(options, &bucket, &object, &filepath, NULL, NULL, clt_params, NULL); + log_status(s); + + cos_pool_destroy(p); +} + +void test_resumable_upload_with_multi_threads() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t filename; + cos_status_t *s = NULL; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_resumable_clt_params_t *clt_params; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT4); + cos_str_set(&filename, TEST_MULTIPART_FILE); + + // upload + clt_params = cos_create_resumable_clt_params_content(p, 1024 * 1024, 8, COS_FALSE, NULL); + s = cos_resumable_upload_file(options, &bucket, &object, &filename, headers, NULL, clt_params, NULL, &resp_headers, + NULL); + + if (cos_status_is_ok(s)) { + printf("upload succeeded\n"); + } else { + printf("upload failed\n"); + } + + cos_pool_destroy(p); +} + +void test_delete_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_status_t *s = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + char *object_name1 = TEST_OBJECT_NAME2; + char *object_name2 = TEST_OBJECT_NAME3; + cos_object_key_t *content1 = NULL; + cos_object_key_t *content2 = NULL; + cos_list_t object_list; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_init(&object_list); + cos_list_init(&deleted_object_list); + content1 = cos_create_cos_object_key(p); + cos_str_set(&content1->key, object_name1); + cos_list_add_tail(&content1->node, &object_list); + content2 = cos_create_cos_object_key(p); + cos_str_set(&content2->key, object_name2); + cos_list_add_tail(&content2->node, &object_list); + + s = cos_delete_objects(options, &bucket, &object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + + cos_pool_destroy(p); + + if (cos_status_is_ok(s)) { + printf("delete objects succeeded\n"); + } else { + printf("delete objects failed\n"); + } +} + +void test_delete_objects_by_prefix() { + cos_pool_t *p = NULL; + cos_request_options_t *options = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_status_t *s = NULL; + cos_string_t prefix; + char *prefix_str = ""; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&prefix, prefix_str); + + s = cos_delete_objects_by_prefix(options, &bucket, &prefix); + log_status(s); + cos_pool_destroy(p); + + printf("test_delete_object_by_prefix ok\n"); +} + +void test_acl() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_acl_e cos_acl = COS_ACL_PRIVATE; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.txt"); + + // put acl + cos_string_t read; + cos_str_set(&read, "id=\"qcs::cam::uin/12345:uin/12345\", id=\"qcs::cam::uin/45678:uin/45678\""); + s = cos_put_bucket_acl(options, &bucket, cos_acl, &read, NULL, NULL, &resp_headers); + log_status(s); + + // get acl + cos_acl_params_t *acl_params = NULL; + acl_params = cos_create_acl_params(p); + s = cos_get_bucket_acl(options, &bucket, acl_params, &resp_headers); + log_status(s); + printf("acl owner id:%s, name:%s\n", acl_params->owner_id.data, acl_params->owner_name.data); + cos_acl_grantee_content_t *acl_content = NULL; + cos_list_for_each_entry(cos_acl_grantee_content_t, acl_content, &acl_params->grantee_list, node) { + printf("acl grantee type:%s, id:%s, name:%s, permission:%s\n", acl_content->type.data, acl_content->id.data, + acl_content->name.data, acl_content->permission.data); + } + + // put acl + s = cos_put_object_acl(options, &bucket, &object, cos_acl, &read, NULL, NULL, &resp_headers); + log_status(s); + + // get acl + cos_acl_params_t *acl_params2 = NULL; + acl_params2 = cos_create_acl_params(p); + s = cos_get_object_acl(options, &bucket, &object, acl_params2, &resp_headers); + log_status(s); + printf("acl owner id:%s, name:%s\n", acl_params2->owner_id.data, acl_params2->owner_name.data); + acl_content = NULL; + cos_list_for_each_entry(cos_acl_grantee_content_t, acl_content, &acl_params2->grantee_list, node) { + printf("acl grantee id:%s, name:%s, permission:%s\n", acl_content->id.data, acl_content->name.data, + acl_content->permission.data); + } + + cos_pool_destroy(p); +} + +void test_copy() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //设置对象复制 + cos_str_set(&object, TEST_OBJECT_NAME2); + cos_str_set(&src_bucket, TEST_BUCKET_NAME); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME1); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, NULL, params, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object copy succeeded\n"); + } else { + printf("put object copy failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_modify_storage_class() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + cos_str_set(&src_bucket, TEST_BUCKET_NAME); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME1); + + // 设置x-cos-metadata-directive和x-cos-storage-class头域(替换为自己要更改的存储类型) + cos_table_t *headers = cos_table_make(p, 2); + apr_table_add(headers, "x-cos-metadata-directive", "Replaced"); + // 存储类型包括NTELLIGENT_TIERING,MAZ_INTELLIGENT_TIERING,STANDARD_IA,ARCHIVE,DEEP_ARCHIVE + apr_table_add(headers, "x-cos-storage-class", "ARCHIVE"); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, headers, params, + &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_copy_mt() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_copy.txt"); + cos_str_set(&src_bucket, "mybucket-1253685564"); + cos_str_set(&src_endpoint, "cn-south.myqcloud.com"); + cos_str_set(&src_object, "test.txt"); + + s = cos_upload_object_by_part_copy_mt(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, 1024 * 1024, + 8, NULL); + log_status(s); + + cos_pool_destroy(p); +} + +void test_copy_with_part_copy() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t copy_source; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_copy.txt"); + cos_str_set(©_source, "mybucket-1253685564.cn-south.myqcloud.com/test.txt"); + + s = cos_upload_object_by_part_copy(options, ©_source, &bucket, &object, 1024 * 1024); + log_status(s); + + cos_pool_destroy(p); +} + +void make_rand_string(cos_pool_t *p, int len, cos_string_t *data) { + char *str = NULL; + int i = 0; + str = (char *)cos_palloc(p, len + 1); + for (; i < len; i++) { + str[i] = 'a' + rand() % 32; + } + str[len] = '\0'; + cos_str_set(data, str); +} + +unsigned long get_file_size(const char *file_path) { + unsigned long filesize = -1; + struct stat statbuff; + + if (stat(file_path, &statbuff) < 0) { + return filesize; + } else { + filesize = statbuff.st_size; + } + + return filesize; +} + +void test_part_copy() { + cos_pool_t *p = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + int is_cname = 0; + cos_string_t upload_id; + cos_list_upload_part_params_t *list_upload_part_params = NULL; + cos_upload_part_copy_params_t *upload_part_copy_params1 = NULL; + cos_upload_part_copy_params_t *upload_part_copy_params2 = NULL; + cos_table_t *headers = NULL; + cos_table_t *query_params = NULL; + cos_table_t *resp_headers = NULL; + cos_table_t *list_part_resp_headers = NULL; + cos_list_t complete_part_list; + cos_list_part_content_t *part_content = NULL; + cos_complete_part_content_t *complete_content = NULL; + cos_table_t *complete_resp_headers = NULL; + cos_status_t *s = NULL; + int part1 = 1; + int part2 = 2; + char *local_filename = "test_upload_part_copy.file"; + char *download_filename = "test_upload_part_copy.file.download"; + char *source_object_name = "cos_test_upload_part_copy_source_object"; + char *dest_object_name = "cos_test_upload_part_copy_dest_object"; + FILE *fd = NULL; + cos_string_t download_file; + cos_string_t dest_bucket; + cos_string_t dest_object; + int64_t range_start1 = 0; + int64_t range_end1 = 6000000; + int64_t range_start2 = 6000001; + int64_t range_end2; + cos_string_t data; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + + // create multipart upload local file + make_rand_string(p, 10 * 1024 * 1024, &data); + fd = fopen(local_filename, "w"); + fwrite(data.data, sizeof(data.data[0]), data.len, fd); + fclose(fd); + + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, source_object_name); + cos_str_set(&file, local_filename); + s = cos_put_object_from_file(options, &bucket, &object, &file, NULL, &resp_headers); + log_status(s); + + // init mulitipart + cos_str_set(&object, dest_object_name); + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, NULL, &resp_headers); + log_status(s); + + // upload part copy 1 + upload_part_copy_params1 = cos_create_upload_part_copy_params(p); + cos_str_set(&upload_part_copy_params1->copy_source, + "bucket-appid.cn-south.myqcloud.com/cos_test_upload_part_copy_source_object"); + cos_str_set(&upload_part_copy_params1->dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&upload_part_copy_params1->dest_object, dest_object_name); + cos_str_set(&upload_part_copy_params1->upload_id, upload_id.data); + upload_part_copy_params1->part_num = part1; + upload_part_copy_params1->range_start = range_start1; + upload_part_copy_params1->range_end = range_end1; + headers = cos_table_make(p, 0); + s = cos_upload_part_copy(options, upload_part_copy_params1, headers, &resp_headers); + log_status(s); + printf("last modified:%s, etag:%s\n", upload_part_copy_params1->rsp_content->last_modify.data, + upload_part_copy_params1->rsp_content->etag.data); + + // upload part copy 2 + resp_headers = NULL; + range_end2 = get_file_size(local_filename) - 1; + upload_part_copy_params2 = cos_create_upload_part_copy_params(p); + cos_str_set(&upload_part_copy_params2->copy_source, + "bucket-appid.cn-south.myqcloud.com/cos_test_upload_part_copy_source_object"); + cos_str_set(&upload_part_copy_params2->dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&upload_part_copy_params2->dest_object, dest_object_name); + cos_str_set(&upload_part_copy_params2->upload_id, upload_id.data); + upload_part_copy_params2->part_num = part2; + upload_part_copy_params2->range_start = range_start2; + upload_part_copy_params2->range_end = range_end2; + headers = cos_table_make(p, 0); + s = cos_upload_part_copy(options, upload_part_copy_params2, headers, &resp_headers); + log_status(s); + printf("last modified:%s, etag:%s\n", upload_part_copy_params1->rsp_content->last_modify.data, + upload_part_copy_params1->rsp_content->etag.data); + + // list part + list_upload_part_params = cos_create_list_upload_part_params(p); + list_upload_part_params->max_ret = 10; + cos_list_init(&complete_part_list); + + cos_str_set(&dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&dest_object, dest_object_name); + s = cos_list_upload_part(options, &dest_bucket, &dest_object, &upload_id, list_upload_part_params, + &list_part_resp_headers); + log_status(s); + cos_list_for_each_entry(cos_list_part_content_t, part_content, &list_upload_part_params->part_list, node) { + complete_content = cos_create_complete_part_content(p); + cos_str_set(&complete_content->part_number, part_content->part_number.data); + cos_str_set(&complete_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_content->node, &complete_part_list); + } + + // complete multipart + headers = cos_table_make(p, 0); + s = cos_complete_multipart_upload(options, &dest_bucket, &dest_object, &upload_id, &complete_part_list, headers, + &complete_resp_headers); + log_status(s); + + // check upload copy part content equal to local file + headers = cos_table_make(p, 0); + cos_str_set(&download_file, download_filename); + s = cos_get_object_to_file(options, &dest_bucket, &dest_object, headers, query_params, &download_file, &resp_headers); + log_status(s); + printf("local file len = %" APR_INT64_T_FMT ", download file len = %" APR_INT64_T_FMT, get_file_size(local_filename), + get_file_size(download_filename)); + remove(download_filename); + remove(local_filename); + cos_pool_destroy(p); + + printf("test part copy ok\n"); +} + +void test_cors() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_t rule_list; + cos_list_init(&rule_list); + cos_cors_rule_content_t *rule_content = NULL; + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule1"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq1.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "xxx"); + rule_content->max_age_seconds = 3600; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule2"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq2.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "yyy"); + rule_content->max_age_seconds = 7200; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule3"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq3.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "zzz"); + rule_content->max_age_seconds = 60; + cos_list_add_tail(&rule_content->node, &rule_list); + + // put cors + s = cos_put_bucket_cors(options, &bucket, &rule_list, &resp_headers); + log_status(s); + + // get cors + cos_list_t rule_list_ret; + cos_list_init(&rule_list_ret); + s = cos_get_bucket_cors(options, &bucket, &rule_list_ret, &resp_headers); + log_status(s); + cos_cors_rule_content_t *content = NULL; + cos_list_for_each_entry(cos_cors_rule_content_t, content, &rule_list_ret, node) { + printf( + "cors id:%s, allowed_origin:%s, allowed_method:%s, allowed_header:%s, expose_header:%s, max_age_seconds:%d\n", + content->id.data, content->allowed_origin.data, content->allowed_method.data, content->allowed_header.data, + content->expose_header.data, content->max_age_seconds); + } + + // delete cors + cos_delete_bucket_cors(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_versioning() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_versioning_content_t *versioning = NULL; + versioning = cos_create_versioning_content(p); + cos_str_set(&versioning->status, "Suspended"); + + // put bucket versioning + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + + // get bucket versioning + cos_str_set(&versioning->status, ""); + s = cos_get_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + printf("bucket versioning status: %s\n", versioning->status.data); + + cos_pool_destroy(p); +} + +void test_replication() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_request_options_t *dst_options = NULL; + cos_string_t bucket; + cos_string_t dst_bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&dst_bucket, "replicationtest"); + + dst_options = cos_request_options_create(p); + init_test_request_options(dst_options, is_cname); + cos_str_set(&dst_options->config->endpoint, "cn-east.myqcloud.com"); + + // enable bucket versioning + cos_versioning_content_t *versioning = NULL; + versioning = cos_create_versioning_content(p); + cos_str_set(&versioning->status, "Enabled"); + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + s = cos_put_bucket_versioning(dst_options, &dst_bucket, versioning, &resp_headers); + log_status(s); + + cos_replication_params_t *replication_param = NULL; + replication_param = cos_create_replication_params(p); + cos_str_set(&replication_param->role, "qcs::cam::uin/100000616666:uin/100000616666"); + + cos_replication_rule_content_t *rule = NULL; + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_01"); + cos_str_set(&rule->status, "Enabled"); + cos_str_set(&rule->prefix, "test1"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_02"); + cos_str_set(&rule->status, "Disabled"); + cos_str_set(&rule->prefix, "test2"); + cos_str_set(&rule->storage_class, "Standard_IA"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_03"); + cos_str_set(&rule->status, "Enabled"); + cos_str_set(&rule->prefix, "test3"); + cos_str_set(&rule->storage_class, "Standard_IA"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + // put bucket replication + s = cos_put_bucket_replication(options, &bucket, replication_param, &resp_headers); + log_status(s); + + // get bucket replication + cos_replication_params_t *replication_param2 = NULL; + replication_param2 = cos_create_replication_params(p); + s = cos_get_bucket_replication(options, &bucket, replication_param2, &resp_headers); + log_status(s); + printf("ReplicationConfiguration role: %s\n", replication_param2->role.data); + cos_replication_rule_content_t *content = NULL; + cos_list_for_each_entry(cos_replication_rule_content_t, content, &replication_param2->rule_list, node) { + printf("ReplicationConfiguration rule, id:%s, status:%s, prefix:%s, dst_bucket:%s, storage_class:%s\n", + content->id.data, content->status.data, content->prefix.data, content->dst_bucket.data, + content->storage_class.data); + } + + // delete bucket replication + s = cos_delete_bucket_replication(options, &bucket, &resp_headers); + log_status(s); + + // disable bucket versioning + cos_str_set(&versioning->status, "Suspended"); + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + s = cos_put_bucket_versioning(dst_options, &dst_bucket, versioning, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_presigned_url() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t presigned_url; + cos_table_t *params = NULL; + cos_table_t *headers = NULL; + int sign_host = 1; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + cos_gen_presigned_url(options, &bucket, &object, 300, HTTP_GET, &presigned_url); + printf("presigned_url: %s\n", presigned_url.data); + + // 添加您自己的params和headers + params = cos_table_make(options->pool, 0); + // cos_table_add(params, "param1", "value"); + headers = cos_table_make(options->pool, 0); + // cos_table_add(headers, "header1", "value"); + + // 强烈建议sign_host为1,这样强制把host头域加入签名列表,防止越权访问问题 + cos_gen_presigned_url_safe(options, &bucket, &object, 300, HTTP_GET, headers, params, sign_host, &presigned_url); + printf("presigned_url_safe: %s\n", presigned_url.data); + + cos_pool_destroy(p); +} + +void test_head_bucket() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + options->ctl = cos_http_controller_create(options->pool, 0); + + status = cos_head_bucket(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_check_bucket_exist() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + cos_bucket_exist_status_e bucket_exist; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 检查桶是否存在 + status = cos_check_bucket_exist(options, &bucket, &bucket_exist, &resp_headers); + if (bucket_exist == COS_BUCKET_NON_EXIST) { + printf("bucket: %.*s non exist.\n", bucket.len, bucket.data); + } else if (bucket_exist == COS_BUCKET_EXIST) { + printf("bucket: %.*s exist.\n", bucket.len, bucket.data); + } else { + printf("bucket: %.*s unknown status.\n", bucket.len, bucket.data); + log_status(status); + } + + cos_pool_destroy(pool); +} + +void test_get_service() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_get_service_params_t *list_params = NULL; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + + //创建get service参数, 默认获取全部bucket + list_params = cos_create_get_service_params(options->pool); + //若将all_region设置为0,则只根据options->config->endpoint的区域进行查询 + // list_params->all_region = 0; + + status = cos_get_service(options, list_params, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + cos_get_service_content_t *content = NULL; + char *line = NULL; + cos_list_for_each_entry(cos_get_service_content_t, content, &list_params->bucket_list, node) { + line = apr_psprintf(options->pool, "%.*s\t%.*s\t%.*s\n", content->bucket_name.len, content->bucket_name.data, + content->location.len, content->location.data, content->creation_date.len, + content->creation_date.data); + printf("%s", line); + } + + cos_pool_destroy(pool); +} + +void test_website() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_website_params_t *website_params = NULL; + cos_website_params_t *website_result = NULL; + cos_website_rule_content_t *website_content = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建website参数 + website_params = cos_create_website_params(options->pool); + cos_str_set(&website_params->index, "index.html"); + cos_str_set(&website_params->redirect_protocol, "https"); + cos_str_set(&website_params->error_document, "Error.html"); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_errcode, "404"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key, "404.html"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_prefix, "docs/"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key_prefix, "documents/"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_prefix, "img/"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key, "demo.jpg"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + status = cos_put_bucket_website(options, &bucket, website_params, &resp_headers); + log_status(status); + + website_result = cos_create_website_params(options->pool); + status = cos_get_bucket_website(options, &bucket, website_result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + cos_website_rule_content_t *content = NULL; + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", website_result->index.len, website_result->index.data); + printf("index: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", website_result->redirect_protocol.len, + website_result->redirect_protocol.data); + printf("redirect protocol: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", website_result->error_document.len, website_result->error_document.data); + printf("error document: %s", line); + cos_list_for_each_entry(cos_website_rule_content_t, content, &website_result->rule_list, node) { + line = apr_psprintf(options->pool, "%.*s\t%.*s\t%.*s\t%.*s\t%.*s\n", content->condition_errcode.len, + content->condition_errcode.data, content->condition_prefix.len, content->condition_prefix.data, + content->redirect_protocol.len, content->redirect_protocol.data, + content->redirect_replace_key.len, content->redirect_replace_key.data, + content->redirect_replace_key_prefix.len, content->redirect_replace_key_prefix.data); + printf("%s", line); + } + + status = cos_delete_bucket_website(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_domain() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_domain_params_t *domain_params = NULL; + cos_domain_params_t *domain_result = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建domain参数 + domain_params = cos_create_domain_params(options->pool); + cos_str_set(&domain_params->status, "ENABLED"); + cos_str_set(&domain_params->name, "www.abc.com"); + cos_str_set(&domain_params->type, "REST"); + cos_str_set(&domain_params->forced_replacement, "CNAME"); + + status = cos_put_bucket_domain(options, &bucket, domain_params, &resp_headers); + log_status(status); + + domain_result = cos_create_domain_params(options->pool); + status = cos_get_bucket_domain(options, &bucket, domain_result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", domain_result->status.len, domain_result->status.data); + printf("status: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->name.len, domain_result->name.data); + printf("name: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->type.len, domain_result->type.data); + printf("type: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->forced_replacement.len, + domain_result->forced_replacement.data); + printf("forced_replacement: %s", line); + + cos_pool_destroy(pool); +} + +void test_logging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_logging_params_t *params = NULL; + cos_logging_params_t *result = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建logging参数 + params = cos_create_logging_params(options->pool); + cos_str_set(¶ms->target_bucket, TEST_BUCKET_NAME); + cos_str_set(¶ms->target_prefix, "logging/"); + + status = cos_put_bucket_logging(options, &bucket, params, &resp_headers); + log_status(status); + + result = cos_create_logging_params(options->pool); + status = cos_get_bucket_logging(options, &bucket, result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", result->target_bucket.len, result->target_bucket.data); + printf("target bucket: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", result->target_prefix.len, result->target_prefix.data); + printf("target prefix: %s", line); + + cos_pool_destroy(pool); +} + +void test_inventory() { + cos_pool_t *pool = NULL; + int is_cname = 0; + int inum = 3, i, len; + char buf[inum][32]; + char dest_bucket[128]; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_inventory_params_t *get_params = NULL; + cos_inventory_optional_t *optional = NULL; + cos_list_inventory_params_t *list_params = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put bucket inventory + len = snprintf(dest_bucket, 128, "qcs::cos:%s::%s", TEST_REGION, TEST_BUCKET_NAME); + dest_bucket[len] = 0; + for (i = 0; i < inum; i++) { + cos_inventory_params_t *params = cos_create_inventory_params(pool); + cos_inventory_optional_t *optional; + len = snprintf(buf[i], 32, "id%d", i); + buf[i][len] = 0; + cos_str_set(¶ms->id, buf[i]); + cos_str_set(¶ms->is_enabled, "true"); + cos_str_set(¶ms->frequency, "Daily"); + cos_str_set(¶ms->filter_prefix, "myPrefix"); + cos_str_set(¶ms->included_object_versions, "All"); + cos_str_set(¶ms->destination.format, "CSV"); + cos_str_set(¶ms->destination.account_id, TEST_UIN); + cos_str_set(¶ms->destination.bucket, dest_bucket); + cos_str_set(¶ms->destination.prefix, "invent"); + params->destination.encryption = 1; + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "Size"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "LastModifiedDate"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "ETag"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "StorageClass"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "ReplicationStatus"); + cos_list_add_tail(&optional->node, ¶ms->fields); + + status = cos_put_bucket_inventory(options, &bucket, params, &resp_headers); + log_status(status); + } + + // get inventory + get_params = cos_create_inventory_params(pool); + cos_str_set(&get_params->id, buf[inum / 2]); + status = cos_get_bucket_inventory(options, &bucket, get_params, &resp_headers); + log_status(status); + + printf("id: %s\nis_enabled: %s\nfrequency: %s\nfilter_prefix: %s\nincluded_object_versions: %s\n", + get_params->id.data, get_params->is_enabled.data, get_params->frequency.data, get_params->filter_prefix.data, + get_params->included_object_versions.data); + printf("destination:\n"); + printf("\tencryption: %d\n", get_params->destination.encryption); + printf("\tformat: %s\n", get_params->destination.format.data); + printf("\taccount_id: %s\n", get_params->destination.account_id.data); + printf("\tbucket: %s\n", get_params->destination.bucket.data); + printf("\tprefix: %s\n", get_params->destination.prefix.data); + cos_list_for_each_entry(cos_inventory_optional_t, optional, &get_params->fields, node) { + printf("field: %s\n", optional->field.data); + } + + // list inventory + list_params = cos_create_list_inventory_params(pool); + status = cos_list_bucket_inventory(options, &bucket, list_params, &resp_headers); + log_status(status); + + get_params = NULL; + cos_list_for_each_entry(cos_inventory_params_t, get_params, &list_params->inventorys, node) { + printf("id: %s\nis_enabled: %s\nfrequency: %s\nfilter_prefix: %s\nincluded_object_versions: %s\n", + get_params->id.data, get_params->is_enabled.data, get_params->frequency.data, get_params->filter_prefix.data, + get_params->included_object_versions.data); + printf("destination:\n"); + printf("\tencryption: %d\n", get_params->destination.encryption); + printf("\tformat: %s\n", get_params->destination.format.data); + printf("\taccount_id: %s\n", get_params->destination.account_id.data); + printf("\tbucket: %s\n", get_params->destination.bucket.data); + printf("\tprefix: %s\n", get_params->destination.prefix.data); + cos_list_for_each_entry(cos_inventory_optional_t, optional, &get_params->fields, node) { + printf("field: %s\n", optional->field.data); + } + } + + // delete inventory + for (i = 0; i < inum; i++) { + cos_string_t id; + cos_str_set(&id, buf[i]); + status = cos_delete_bucket_inventory(options, &bucket, &id, &resp_headers); + log_status(status); + } + + cos_pool_destroy(pool); +} + +void test_bucket_tagging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_tagging_params_t *params = NULL; + cos_tagging_params_t *result = NULL; + cos_tagging_tag_t *tag = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put tagging + params = cos_create_tagging_params(pool); + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "age"); + cos_str_set(&tag->value, "18"); + cos_list_add_tail(&tag->node, ¶ms->node); + + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "name"); + cos_str_set(&tag->value, "xiaoming"); + cos_list_add_tail(&tag->node, ¶ms->node); + + status = cos_put_bucket_tagging(options, &bucket, params, &resp_headers); + log_status(status); + + // get tagging + result = cos_create_tagging_params(pool); + status = cos_get_bucket_tagging(options, &bucket, result, &resp_headers); + log_status(status); + + tag = NULL; + cos_list_for_each_entry(cos_tagging_tag_t, tag, &result->node, node) { + printf("taging key: %s\n", tag->key.data); + printf("taging value: %s\n", tag->value.data); + } + + // delete tagging + status = cos_delete_bucket_tagging(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_object_tagging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t version_id = cos_string(""); + cos_tagging_params_t *params = NULL; + cos_tagging_params_t *result = NULL; + cos_tagging_tag_t *tag = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + // put object tagging + params = cos_create_tagging_params(pool); + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "age"); + cos_str_set(&tag->value, "18"); + cos_list_add_tail(&tag->node, ¶ms->node); + + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "name"); + cos_str_set(&tag->value, "xiaoming"); + cos_list_add_tail(&tag->node, ¶ms->node); + + status = cos_put_object_tagging(options, &bucket, &object, &version_id, NULL, params, &resp_headers); + log_status(status); + + // get object tagging + result = cos_create_tagging_params(pool); + status = cos_get_object_tagging(options, &bucket, &object, &version_id, NULL, result, &resp_headers); + log_status(status); + + tag = NULL; + cos_list_for_each_entry(cos_tagging_tag_t, tag, &result->node, node) { + printf("taging key: %s\n", tag->key.data); + printf("taging value: %s\n", tag->value.data); + } + + // delete tagging + status = cos_delete_object_tagging(options, &bucket, &object, &version_id, NULL, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +static void log_get_referer(cos_referer_params_t *result) { + int index = 0; + cos_referer_domain_t *domain; + + cos_warn_log("status: %s", result->status.data); + cos_warn_log("referer_type: %s", result->referer_type.data); + cos_warn_log("empty_refer_config: %s", result->empty_refer_config.data); + + cos_list_for_each_entry(cos_referer_domain_t, domain, &result->domain_list, node) { + cos_warn_log("domain index:%d", ++index); + cos_warn_log("domain: %s", domain->domain.data); + } +} + +void test_referer() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_referer_params_t *params = NULL; + cos_referer_domain_t *domain = NULL; + cos_referer_params_t *result = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/32492 + params = cos_create_referer_params(pool); + cos_str_set(¶ms->status, "Enabled"); + cos_str_set(¶ms->referer_type, "White-List"); + cos_str_set(¶ms->empty_refer_config, "Allow"); + domain = cos_create_referer_domain(pool); + cos_str_set(&domain->domain, "www.qq.com"); + cos_list_add_tail(&domain->node, ¶ms->domain_list); + domain = cos_create_referer_domain(pool); + cos_str_set(&domain->domain, "*.tencent.com"); + cos_list_add_tail(&domain->node, ¶ms->domain_list); + + // put referer + status = cos_put_bucket_referer(options, &bucket, params, &resp_headers); + log_status(status); + + // get referer + result = cos_create_referer_params(pool); + status = cos_get_bucket_referer(options, &bucket, result, &resp_headers); + log_status(status); + if (status->code == 200) { + log_get_referer(result); + } + + cos_pool_destroy(pool); +} + +void test_intelligenttiering() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_intelligenttiering_params_t *params = NULL; + cos_intelligenttiering_params_t *result = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put intelligenttiering + params = cos_create_intelligenttiering_params(pool); + cos_str_set(¶ms->status, "Enabled"); + params->days = 30; + + status = cos_put_bucket_intelligenttiering(options, &bucket, params, &resp_headers); + log_status(status); + + // get intelligenttiering + result = cos_create_intelligenttiering_params(pool); + status = cos_get_bucket_intelligenttiering(options, &bucket, result, &resp_headers); + log_status(status); + + printf("status: %s\n", result->status.data); + printf("days: %d\n", result->days); + cos_pool_destroy(pool); +} + +void test_delete_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->prefix, "folder/"); + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + + s = cos_delete_objects(options, &bucket, &list_params->object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("delete objects failed, req_id:%s\n", s->req_id); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_list_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + // prefix表示列出的object的key以prefix开始 + cos_str_set(&list_params->prefix, "folder/"); + // deliter表示分隔符, 设置为/表示列出当前目录下的object, 设置为空表示列出所有的object + cos_str_set(&list_params->delimiter, "/"); + // 设置最大遍历出多少个对象, 一次listobject最大支持1000 + list_params->max_ret = 1000; + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + // list_params->object_list 返回列出的object对象。 + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %s\n", content->key.data); + } + // list_params->common_prefix_list 表示被delimiter截断的路径, 如delimter设置为/, common prefix则表示所有子目录的路径 + cos_list_object_common_prefix_t *common_prefix = NULL; + cos_list_for_each_entry(cos_list_object_common_prefix_t, common_prefix, &list_params->common_prefix_list, node) { + printf("common prefix: %s\n", common_prefix->prefix.data); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_list_all_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + // 设置最大遍历出多少个对象, 一次listobject最大支持1000 + list_params->max_ret = 1000; + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + cos_list_init(&list_params->object_list); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + // list_params->object_list 返回列出的object对象。 + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %s\n", content->key.data); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_download_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t file_name; + cos_string_t suffix = cos_string("/"); + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_table_t *params = NULL; + int is_truncated = 1; + cos_string_t marker; + apr_status_t status; + + //初始化请求选项 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->prefix, "folder/"); //替换为您自己的目录名称 + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + cos_str_set(&file_name, content->key.data); + if (cos_ends_with(&content->key, &suffix)) { + //如果是目录需要先创建, 0x0755权限可以自己按需修改,参考apr_file_info.h中定义 + status = apr_dir_make(content->key.data, 0x0755, options->pool); + if (status != APR_SUCCESS && !APR_STATUS_IS_EEXIST(status)) { + printf("mkdir: %s failed, status: %d\n", content->key.data, status); + } + } else { + //下载对象到本地目录,这里默认下载在程序运行的当前目录 + s = cos_get_object_to_file(options, &bucket, &content->key, headers, params, &file_name, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("get object[%s] failed, req_id:%s\n", content->key.data, s->req_id); + } + } + } + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_move() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //设置对象复制 + cos_str_set(&object, TEST_OBJECT_NAME1); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME2); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &bucket, &src_object, &src_endpoint, &bucket, &object, NULL, params, &resp_headers); + log_status(s); + if (cos_status_is_ok(s)) { + s = cos_delete_object(options, &bucket, &src_object, &resp_headers); + log_status(s); + printf("move object succeeded\n"); + } else { + printf("move object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +// 基础图片处理 +void test_ci_base_image_process() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/thumbnail/!50p", ""); + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } + cos_pool_destroy(p); +} + +// 持久化处理 +void test_ci_image_process() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + ci_operation_result_t *results = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.jpg"); + + // 云上数据处理 + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"imageView2/format/png\"}]}"); + s = ci_image_process(options, &bucket, &object, headers, &resp_headers, &results); + log_status(s); + printf("origin key: %s\n", results->origin.key.data); + printf("process key: %s\n", results->object.key.data); + + // 上传时处理 + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"imageView2/format/png\"}]}"); + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = ci_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers, &results); + log_status(s); + printf("origin key: %s\n", results->origin.key.data); + printf("process key: %s\n", results->object.key.data); + + cos_pool_destroy(p); +} + +// 二维码识别 +void test_ci_image_qrcode() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + ci_operation_result_t *results = NULL; + ci_qrcode_info_t *content = NULL; + ci_qrcode_result_t *result2 = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.jpg"); + + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"QRcode/cover/1\"}]}"); + // 上传时识别 + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = ci_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers, &results); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("put object failed\n"); + } + printf("CodeStatus: %d\n", results->object.code_status); + cos_list_for_each_entry(ci_qrcode_info_t, content, &results->object.qrcode_info, node) { + printf("CodeUrl: %s\n", content->code_url.data); + printf("Point: %s\n", content->point[0].data); + printf("Point: %s\n", content->point[1].data); + printf("Point: %s\n", content->point[2].data); + printf("Point: %s\n", content->point[3].data); + } + + // 下载时识别 + s = ci_get_qrcode(options, &bucket, &object, 1, NULL, NULL, &resp_headers, &result2); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("get object failed\n"); + } + printf("CodeStatus: %d\n", result2->code_status); + cos_list_for_each_entry(ci_qrcode_info_t, content, &result2->qrcode_info, node) { + printf("CodeUrl: %s\n", content->code_url.data); + printf("Point: %s\n", content->point[0].data); + printf("Point: %s\n", content->point[1].data); + printf("Point: %s\n", content->point[2].data); + printf("Point: %s\n", content->point[3].data); + } + printf("ImageResult: %s\n", result2->result_image.data); + + //销毁内存池 + cos_pool_destroy(p); +} + +// 图片压缩 +void test_ci_image_compression() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/format/tpg", ""); + cos_str_set(&object, "test.jpg"); + cos_str_set(&file, "test.tpg"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/format/heif", ""); + cos_str_set(&file, "test.heif"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } +} + +static void log_video_auditing_result(ci_video_auditing_job_result_t *result) { + cos_warn_log("jobid: %s", result->jobs_detail.job_id.data); + cos_warn_log("state: %s", result->jobs_detail.state.data); + cos_warn_log("creation_time: %s", result->jobs_detail.creation_time.data); +} + +static void log_get_auditing_result(ci_auditing_job_result_t *result) { + int index = 0; + ci_auditing_snapshot_result_t *snapshot_info; + ci_auditing_audio_section_result_t *audio_section_info; + + cos_warn_log("nonexist_job_ids: %s", result->nonexist_job_ids.data); + cos_warn_log("code: %s", result->jobs_detail.code.data); + cos_warn_log("message: %s", result->jobs_detail.message.data); + cos_warn_log("state: %s", result->jobs_detail.state.data); + cos_warn_log("creation_time: %s", result->jobs_detail.creation_time.data); + cos_warn_log("object: %s", result->jobs_detail.object.data); + cos_warn_log("snapshot_count: %s", result->jobs_detail.snapshot_count.data); + cos_warn_log("result: %d", result->jobs_detail.result); + + cos_warn_log("porn_info.hit_flag: %d", result->jobs_detail.porn_info.hit_flag); + cos_warn_log("porn_info.count: %d", result->jobs_detail.porn_info.count); + cos_warn_log("terrorism_info.hit_flag: %d", result->jobs_detail.terrorism_info.hit_flag); + cos_warn_log("terrorism_info.count: %d", result->jobs_detail.terrorism_info.count); + cos_warn_log("politics_info.hit_flag: %d", result->jobs_detail.politics_info.hit_flag); + cos_warn_log("politics_info.count: %d", result->jobs_detail.politics_info.count); + cos_warn_log("ads_info.hit_flag: %d", result->jobs_detail.ads_info.hit_flag); + cos_warn_log("ads_info.count: %d", result->jobs_detail.ads_info.count); + + cos_list_for_each_entry(ci_auditing_snapshot_result_t, snapshot_info, &result->jobs_detail.snapshot_info_list, node) { + cos_warn_log("snapshot index:%d", ++index); + cos_warn_log("snapshot_info->url: %s", snapshot_info->url.data); + cos_warn_log("snapshot_info->snapshot_time: %d", snapshot_info->snapshot_time); + cos_warn_log("snapshot_info->text: %s", snapshot_info->text.data); + + cos_warn_log("snapshot_info->porn_info.hit_flag: %d", snapshot_info->porn_info.hit_flag); + cos_warn_log("snapshot_info->porn_info.score: %d", snapshot_info->porn_info.score); + cos_warn_log("snapshot_info->porn_info.label: %s", snapshot_info->porn_info.label.data); + cos_warn_log("snapshot_info->porn_info.sub_lable: %s", snapshot_info->porn_info.sub_lable.data); + cos_warn_log("snapshot_info->terrorism_info.hit_flag: %d", snapshot_info->terrorism_info.hit_flag); + cos_warn_log("snapshot_info->terrorism_info.score: %d", snapshot_info->terrorism_info.score); + cos_warn_log("snapshot_info->terrorism_info.label: %s", snapshot_info->terrorism_info.label.data); + cos_warn_log("snapshot_info->terrorism_info.sub_lable: %s", snapshot_info->terrorism_info.sub_lable.data); + cos_warn_log("snapshot_info->politics_info.hit_flag: %d", snapshot_info->politics_info.hit_flag); + cos_warn_log("snapshot_info->politics_info.score: %d", snapshot_info->politics_info.score); + cos_warn_log("snapshot_info->politics_info.label: %s", snapshot_info->politics_info.label.data); + cos_warn_log("snapshot_info->politics_info.sub_lable: %s", snapshot_info->politics_info.sub_lable.data); + cos_warn_log("snapshot_info->ads_info.hit_flag: %d", snapshot_info->ads_info.hit_flag); + cos_warn_log("snapshot_info->ads_info.score: %d", snapshot_info->ads_info.score); + cos_warn_log("snapshot_info->ads_info.label: %s", snapshot_info->ads_info.label.data); + cos_warn_log("snapshot_info->ads_info.sub_lable: %s", snapshot_info->ads_info.sub_lable.data); + } + + index = 0; + cos_list_for_each_entry(ci_auditing_audio_section_result_t, audio_section_info, + &result->jobs_detail.audio_section_info_list, node) { + cos_warn_log("audio_section index:%d", ++index); + cos_warn_log("audio_section_info->url: %s", audio_section_info->url.data); + cos_warn_log("audio_section_info->text: %s", audio_section_info->text.data); + cos_warn_log("audio_section_info->offset_time: %d", audio_section_info->offset_time); + cos_warn_log("audio_section_info->duration: %d", audio_section_info->duration); + + cos_warn_log("audio_section_info->porn_info.hit_flag: %d", audio_section_info->porn_info.hit_flag); + cos_warn_log("audio_section_info->porn_info.score: %d", audio_section_info->porn_info.score); + cos_warn_log("audio_section_info->porn_info.key_words: %s", audio_section_info->porn_info.key_words.data); + cos_warn_log("audio_section_info->terrorism_info.hit_flag: %d", audio_section_info->terrorism_info.hit_flag); + cos_warn_log("audio_section_info->terrorism_info.score: %d", audio_section_info->terrorism_info.score); + cos_warn_log("audio_section_info->terrorism_info.key_words: %s", audio_section_info->terrorism_info.key_words.data); + cos_warn_log("audio_section_info->politics_info.hit_flag: %d", audio_section_info->politics_info.hit_flag); + cos_warn_log("audio_section_info->politics_info.score: %d", audio_section_info->politics_info.score); + cos_warn_log("audio_section_info->politics_info.key_words: %s", audio_section_info->politics_info.key_words.data); + cos_warn_log("audio_section_info->ads_info.hit_flag: %d", audio_section_info->ads_info.hit_flag); + cos_warn_log("audio_section_info->ads_info.score: %d", audio_section_info->ads_info.score); + cos_warn_log("audio_section_info->ads_info.key_words: %s", audio_section_info->ads_info.key_words.data); + } +} + +static void log_media_buckets_result(ci_media_buckets_result_t *result) { + int index = 0; + ci_media_bucket_list_t *media_bucket; + + cos_warn_log("total_count: %d", result->total_count); + cos_warn_log("page_number: %d", result->page_number); + cos_warn_log("page_size: %d", result->page_size); + + cos_list_for_each_entry(ci_media_bucket_list_t, media_bucket, &result->media_bucket_list, node) { + cos_warn_log("media_bucket index:%d", ++index); + cos_warn_log("media_bucket->bucket_id: %s", media_bucket->bucket_id.data); + cos_warn_log("media_bucket->name: %s", media_bucket->name.data); + cos_warn_log("media_bucket->region: %s", media_bucket->region.data); + cos_warn_log("media_bucket->create_time: %s", media_bucket->create_time.data); + } +} + +static void log_media_info_result(ci_media_info_result_t *result) { + // format + cos_warn_log("format.num_stream: %d", result->format.num_stream); + cos_warn_log("format.num_program: %d", result->format.num_program); + cos_warn_log("format.format_name: %s", result->format.format_name.data); + cos_warn_log("format.format_long_name: %s", result->format.format_long_name.data); + cos_warn_log("format.start_time: %f", result->format.start_time); + cos_warn_log("format.duration: %f", result->format.duration); + cos_warn_log("format.bit_rate: %d", result->format.bit_rate); + cos_warn_log("format.size: %d", result->format.size); + + // stream.video + cos_warn_log("stream.video.index: %d", result->stream.video.index); + cos_warn_log("stream.video.codec_name: %s", result->stream.video.codec_name.data); + cos_warn_log("stream.video.codec_long_name: %s", result->stream.video.codec_long_name.data); + cos_warn_log("stream.video.codec_time_base: %s", result->stream.video.codec_time_base.data); + cos_warn_log("stream.video.codec_tag_string: %s", result->stream.video.codec_tag_string.data); + cos_warn_log("stream.video.codec_tag: %s", result->stream.video.codec_tag.data); + cos_warn_log("stream.video.profile: %s", result->stream.video.profile.data); + cos_warn_log("stream.video.height: %d", result->stream.video.height); + cos_warn_log("stream.video.width: %d", result->stream.video.width); + cos_warn_log("stream.video.has_b_frame: %d", result->stream.video.has_b_frame); + cos_warn_log("stream.video.ref_frames: %d", result->stream.video.ref_frames); + cos_warn_log("stream.video.sar: %s", result->stream.video.sar.data); + cos_warn_log("stream.video.dar: %s", result->stream.video.dar.data); + cos_warn_log("stream.video.pix_format: %s", result->stream.video.pix_format.data); + cos_warn_log("stream.video.field_order: %s", result->stream.video.field_order.data); + cos_warn_log("stream.video.level: %d", result->stream.video.level); + cos_warn_log("stream.video.fps: %d", result->stream.video.fps); + cos_warn_log("stream.video.avg_fps: %s", result->stream.video.avg_fps.data); + cos_warn_log("stream.video.timebase: %s", result->stream.video.timebase.data); + cos_warn_log("stream.video.start_time: %f", result->stream.video.start_time); + cos_warn_log("stream.video.duration: %f", result->stream.video.duration); + cos_warn_log("stream.video.bit_rate: %f", result->stream.video.bit_rate); + cos_warn_log("stream.video.num_frames: %d", result->stream.video.num_frames); + cos_warn_log("stream.video.language: %s", result->stream.video.language.data); + + // stream.audio + cos_warn_log("stream.audio.index: %d", result->stream.audio.index); + cos_warn_log("stream.audio.codec_name: %s", result->stream.audio.codec_name.data); + cos_warn_log("stream.audio.codec_long_name: %s", result->stream.audio.codec_long_name.data); + cos_warn_log("stream.audio.codec_time_base: %s", result->stream.audio.codec_time_base.data); + cos_warn_log("stream.audio.codec_tag_string: %s", result->stream.audio.codec_tag_string.data); + cos_warn_log("stream.audio.codec_tag: %s", result->stream.audio.codec_tag.data); + cos_warn_log("stream.audio.sample_fmt: %s", result->stream.audio.sample_fmt.data); + cos_warn_log("stream.audio.sample_rate: %d", result->stream.audio.sample_rate); + cos_warn_log("stream.audio.channel: %d", result->stream.audio.channel); + cos_warn_log("stream.audio.channel_layout: %s", result->stream.audio.channel_layout.data); + cos_warn_log("stream.audio.timebase: %s", result->stream.audio.timebase.data); + cos_warn_log("stream.audio.start_time: %f", result->stream.audio.start_time); + cos_warn_log("stream.audio.duration: %f", result->stream.audio.duration); + cos_warn_log("stream.audio.bit_rate: %f", result->stream.audio.bit_rate); + cos_warn_log("stream.audio.language: %s", result->stream.audio.language.data); + + // stream.subtitle + cos_warn_log("stream.subtitle.index: %d", result->stream.subtitle.index); + cos_warn_log("stream.subtitle.language: %s", result->stream.subtitle.language.data); +} + +void test_ci_video_auditing() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + ci_video_auditing_job_options_t *job_options; + ci_video_auditing_job_result_t *job_result; + ci_auditing_job_result_t *auditing_result; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_CI_ENDPOINT); // https://ci..myqcloud.com + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/47316 + job_options = ci_video_auditing_job_options_create(p); + cos_str_set(&job_options->input_object, "test.mp4"); + cos_str_set(&job_options->job_conf.detect_type, "Porn,Terrorism,Politics,Ads"); + cos_str_set(&job_options->job_conf.callback_version, "Detail"); + job_options->job_conf.detect_content = 1; + cos_str_set(&job_options->job_conf.snapshot.mode, "Interval"); + job_options->job_conf.snapshot.time_interval = 1.5; + job_options->job_conf.snapshot.count = 10; + + // 提交一个视频审核任务 + s = ci_create_video_auditing_job(options, &bucket, job_options, NULL, &resp_headers, &job_result); + log_status(s); + if (s->code == 200) { + log_video_auditing_result(job_result); + } + + // 等待视频审核任务完成,此处可修改您的等待时间 + sleep(300); + + // 获取审核任务结果 + s = ci_get_auditing_job(options, &bucket, &job_result->jobs_detail.job_id, NULL, &resp_headers, &auditing_result); + log_status(s); + if (s->code == 200) { + log_get_auditing_result(auditing_result); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_media_bucket() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers; + ci_media_buckets_request_t *media_buckets_request; + ci_media_buckets_result_t *media_buckets_result; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_CI_ENDPOINT); // https://ci..myqcloud.com + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/48988 + media_buckets_request = ci_media_buckets_request_create(p); + cos_str_set(&media_buckets_request->regions, ""); + cos_str_set(&media_buckets_request->bucket_names, ""); + cos_str_set(&media_buckets_request->bucket_name, ""); + cos_str_set(&media_buckets_request->page_number, "1"); + cos_str_set(&media_buckets_request->page_size, "10"); + s = ci_describe_media_buckets(options, media_buckets_request, NULL, &resp_headers, &media_buckets_result); + log_status(s); + if (s->code == 200) { + log_media_buckets_result(media_buckets_result); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_snapshot() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + cos_list_t download_buffer; + cos_string_t object; + ci_get_snapshot_request_t *snapshot_request; + cos_buf_t *content = NULL; + cos_string_t pic_file = cos_string("snapshot.jpg"); + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.mp4"); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/55671 + snapshot_request = ci_snapshot_request_create(p); + snapshot_request->time = 7.5; + snapshot_request->width = 0; + snapshot_request->height = 0; + cos_str_set(&snapshot_request->format, "jpg"); + cos_str_set(&snapshot_request->rotate, "auto"); + cos_str_set(&snapshot_request->mode, "exactframe"); + cos_list_init(&download_buffer); + + s = ci_get_snapshot_to_buffer(options, &bucket, &object, snapshot_request, NULL, &download_buffer, &resp_headers); + log_status(s); + + int64_t len = 0; + int64_t size = 0; + int64_t pos = 0; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { len += cos_buf_size(content); } + char *buf = cos_pcalloc(p, (apr_size_t)(len + 1)); + buf[len] = '\0'; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { + size = cos_buf_size(content); + memcpy(buf + pos, content->pos, (size_t)size); + pos += size; + } + cos_warn_log("Download len:%ld data=%s", len, buf); + + s = ci_get_snapshot_to_file(options, &bucket, &object, snapshot_request, NULL, &pic_file, &resp_headers); + log_status(s); + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_media_info() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + ci_media_info_result_t *media_info; + cos_string_t object; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.mp4"); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/55672 + s = ci_get_media_info(options, &bucket, &object, NULL, &resp_headers, &media_info); + log_status(s); + if (s->code == 200) { + log_media_info_result(media_info); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +int main(int argc, char *argv[]) { + // 通过环境变量获取 SECRETID 和 SECRETKEY + // TEST_ACCESS_KEY_ID = getenv("COS_SECRETID"); + // TEST_ACCESS_KEY_SECRET = getenv("COS_SECRETKEY"); + + if (cos_http_io_initialize(NULL, 0) != COSE_OK) { + exit(1); + } + + // set log level, default COS_LOG_WARN + cos_log_set_level(COS_LOG_WARN); + + // set log output, default stderr + cos_log_set_output(NULL); + + // test_intelligenttiering(); + // test_bucket_tagging(); + // test_object_tagging(); + // test_referer(); + // test_logging(); + // test_inventory(); + // test_put_object_from_file_with_sse(); + // test_get_object_to_file_with_sse(); + // test_head_bucket(); + // test_check_bucket_exist(); + // test_get_service(); + // test_website(); + // test_domain(); + // test_delete_objects(); + // test_delete_objects_by_prefix(); + // test_bucket(); + // test_bucket_lifecycle(); + // test_object_restore(); + test_put_object_from_file(); + // test_sign(); + // test_object(); + // test_put_object_with_limit(); + // test_get_object_with_limit(); + // test_head_object(); + // test_gen_object_url(); + // test_list_objects(); + // test_list_directory(); + // test_list_all_objects(); + // test_create_dir(); + // test_append_object(); + // test_check_object_exist(); + // multipart_upload_file_from_file(); + // multipart_upload_file_from_buffer(); + // abort_multipart_upload(); + // list_multipart(); + + // pthread_t tid[20]; + // test_resumable_upload_with_multi_threads(); + // test_resumable(); + // test_bucket(); + // test_acl(); + // test_copy(); + // test_modify_storage_class(); + // test_cors(); + // test_versioning(); + // test_replication(); + // test_part_copy(); + // test_copy_with_part_copy(); + // test_move(); + // test_delete_directory(); + // test_download_directory(); + // test_presigned_url(); + // test_ci_base_image_process(); + // test_ci_image_process(); + // test_ci_image_qrcode(); + // test_ci_image_compression(); + // test_ci_video_auditing(); + // test_ci_media_process_media_bucket(); + // test_ci_media_process_snapshot(); + // test_ci_media_process_media_info(); + + // cos_http_io_deinitialize last + cos_http_io_deinitialize(); + + return 0; +} diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 0546ed7f47..1f6d0800a5 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -14,8 +14,8 @@ */ #define _DEFAULT_SOURCE -#include "os.h" #include "tglobal.h" +#include "os.h" #include "tconfig.h" #include "tgrant.h" #include "tlog.h" @@ -63,7 +63,7 @@ int32_t tsNumOfQnodeFetchThreads = 1; int32_t tsNumOfSnodeStreamThreads = 4; int32_t tsNumOfSnodeWriteThreads = 1; int32_t tsMaxStreamBackendCache = 128; // M -int32_t tsPQSortMemThreshold = 16; // M +int32_t tsPQSortMemThreshold = 16; // M // sync raft int32_t tsElectInterval = 25 * 1000; @@ -121,8 +121,8 @@ int32_t tsQueryPolicy = 1; int32_t tsQueryRspPolicy = 0; int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT bool tsEnableQueryHb = true; -bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true -bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true +bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true +bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true int32_t tsQuerySmaOptimize = 0; int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data. bool tsQueryPlannerTrace = false; @@ -235,6 +235,13 @@ int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000; bool tsFilterScalarMode = false; int32_t tsKeepTimeOffset = 0; // latency of data migration +char tsS3Endpoint[TSDB_FQDN_LEN] = ""; +char tsS3AcessKeyId[TSDB_FQDN_LEN] = ""; +char tsS3AcessKeySecret[TSDB_FQDN_LEN] = ""; +char tsS3BucketName[TSDB_FQDN_LEN] = ""; +char tsS3AppId[TSDB_FQDN_LEN] = ""; +int8_t tsS3Enabled = false; + #ifndef _STORAGE int32_t taosSetTfsCfg(SConfig *pCfg) { SConfigItem *pItem = cfgGetItem(pCfg, "dataDir"); @@ -256,7 +263,9 @@ int32_t taosSetTfsCfg(SConfig *pCfg) { int32_t taosSetTfsCfg(SConfig *pCfg); #endif -struct SConfig *taosGetCfg() { return tsCfg; } +struct SConfig *taosGetCfg() { + return tsCfg; +} static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile, char *apolloUrl) { @@ -376,7 +385,9 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, CFG_SCOPE_CLIENT) != 0) return -1; + if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, + CFG_SCOPE_CLIENT) != 0) + return -1; if (cfgAddInt32(pCfg, "metaCacheMaxSize", tsMetaCacheMaxSize, -1, INT32_MAX, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddInt32(pCfg, "slowLogThreshold", tsSlowLogThreshold, 0, INT32_MAX, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddString(pCfg, "slowLogScope", "", CFG_SCOPE_CLIENT) != 0) return -1; @@ -389,7 +400,8 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "numOfRpcSessions", tsNumOfRpcSessions, 1, 100000, CFG_SCOPE_BOTH) != 0) return -1; tsTimeToGetAvailableConn = TRANGE(tsTimeToGetAvailableConn, 20, 10000000); - if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH) != 0) return -1; + if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH) != 0) + return -1; tsNumOfTaskQueueThreads = tsNumOfCores / 2; tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4); @@ -449,7 +461,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 1, 1000000, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, CFG_SCOPE_CLIENT) != 0) return -1; - if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, + CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "queryBufferSize", tsQueryBufferSize, -1, 500000000000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "printAuth", tsPrintAuth, CFG_SCOPE_SERVER) != 0) return -1; @@ -477,7 +491,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4); if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 4, 1024, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER) != 0) + return -1; tsNumOfVnodeFetchThreads = tsNumOfCores / 4; tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4); @@ -497,7 +512,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfSnodeStreamThreads = tsNumOfCores / 4; tsNumOfSnodeStreamThreads = TRANGE(tsNumOfSnodeStreamThreads, 2, 4); - if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER) != 0) + return -1; tsNumOfSnodeWriteThreads = tsNumOfCores / 4; tsNumOfSnodeWriteThreads = TRANGE(tsNumOfSnodeWriteThreads, 2, 4); @@ -505,14 +521,18 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1; tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL); - if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_BOTH) != 0) + if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, + CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) + return -1; + if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) + return -1; - if (cfgAddInt64(pCfg, "vndCommitMaxInterval", tsVndCommitMaxIntervalMs, 1000, 1000 * 60 * 60, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt64(pCfg, "vndCommitMaxInterval", tsVndCommitMaxIntervalMs, 1000, 1000 * 60 * 60, CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, CFG_SCOPE_SERVER) != 0) return -1; @@ -542,7 +562,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX, CFG_SCOPE_SERVER) != 0) + if (cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX, + CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "udf", tsStartUdfd, CFG_SCOPE_SERVER) != 0) return -1; @@ -553,13 +574,16 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt64(pCfg, "streamBufferSize", tsStreamBufferSize, 0, INT64_MAX, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt64(pCfg, "checkpointInterval", tsCheckpointInterval, 0, INT64_MAX, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "keepTimeOffset", tsKeepTimeOffset, 0, 23, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1; + GRANT_CFG_ADD; return 0; } @@ -908,7 +932,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; - tmqMaxTopicNum= cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; + tmqMaxTopicNum = cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32; tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; @@ -948,6 +972,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32; tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32; + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); + GRANT_CFG_GET; return 0; } @@ -1020,7 +1046,7 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosSetCoreDump(enableCore); } else if (strcasecmp("enableQueryHb", name) == 0) { tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval; - } else if (strcasecmp("ttlChangeOnWrite", name) == 0) { + } else if (strcasecmp("ttlChangeOnWrite", name) == 0) { tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval; } break; @@ -1249,9 +1275,9 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { // tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval; // } else if (strcasecmp("smlBatchSize", name) == 0) { // tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32; - } else if(strcasecmp("smlTsDefaultName", name) == 0) { + } else if (strcasecmp("smlTsDefaultName", name) == 0) { tstrncpy(tsSmlTsDefaultName, cfgGetItem(pCfg, "smlTsDefaultName")->str, TSDB_COL_NAME_LEN); - } else if(strcasecmp("smlDot2Underline", name) == 0) { + } else if (strcasecmp("smlDot2Underline", name) == 0) { tsSmlDot2Underline = cfgGetItem(pCfg, "smlDot2Underline")->bval; } else if (strcasecmp("shellActivityTimer", name) == 0) { tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32; @@ -1272,6 +1298,8 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp); snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port); cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype); + } else if (strcasecmp("s3BucketName", name) == 0) { + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); } else if (strcasecmp("sDebugFlag", name) == 0) { sDebugFlag = cfgGetItem(pCfg, "sDebugFlag")->i32; } else if (strcasecmp("smaDebugFlag", name) == 0) { diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 194ffa16f6..0612f924f5 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -8,6 +8,7 @@ set( "src/vnd/vnodeCommit.c" "src/vnd/vnodeQuery.c" "src/vnd/vnodeModule.c" + "src/vnd/vnodeCos.c" "src/vnd/vnodeSvr.c" "src/vnd/vnodeSync.c" "src/vnd/vnodeSnapshot.c" @@ -134,6 +135,11 @@ else() endif() endif() +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) + target_link_libraries( vnode PUBLIC os @@ -153,6 +159,13 @@ target_link_libraries( PUBLIC transport PUBLIC stream PUBLIC index + + # s3 + cos_c_sdk + ${APR_UTIL_LIBRARY} + ${APR_LIBRARY} + ${MINIXML_LIBRARY} + ${CURL_LIBRARY} ) IF (TD_GRANT) @@ -169,7 +182,20 @@ if(${BUILD_WITH_ROCKSDB}) add_definitions(-DUSE_ROCKSDB) endif(${BUILD_WITH_ROCKSDB}) - +# s3 +FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) +IF (APR_CONFIG_BIN) + EXECUTE_PROCESS( + COMMAND ${APR_CONFIG_BIN} --includedir + OUTPUT_VARIABLE APR_INCLUDE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +ENDIF() +include_directories (${APR_INCLUDE_DIR}) +target_include_directories( + vnode + PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + ) if(${BUILD_TEST}) add_subdirectory(test) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h new file mode 100644 index 0000000000..b8510213d7 --- /dev/null +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_VND_COS_H_ +#define _TD_VND_COS_H_ + +#include "vnd.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern int8_t tsS3Enabled; + +int32_t s3Init(); +void s3CleanUp(); +void s3PutObjectFromFile(const char *file, const char *object); +void s3DeleteObjects(const char *object_name[], int nobject); + +#ifdef __cplusplus +} +#endif + +#endif /*_TD_VND_COS_H_*/ diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index a4d5715083..ebe20c0e85 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -15,6 +15,7 @@ #include "tsdb.h" #include "tsdbFS2.h" +#include "vndCos.h" typedef struct { STsdb *tsdb; @@ -41,6 +42,28 @@ static int32_t tsdbDoRemoveFileObject(SRTNer *rtner, const STFileObj *fobj) { return TARRAY2_APPEND(rtner->fopArr, op); } +static int32_t tsdbRemoveFileObjectS3(SRTNer *rtner, const STFileObj *fobj) { + int32_t code = 0, lino = 0; + + STFileOp op = { + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + const char *object_name = taosDirEntryBaseName((char *)fobj->fname); + s3DeleteObjects(&object_name, 1); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + } + return code; +} + static int32_t tsdbDoCopyFile(SRTNer *rtner, const STFileObj *from, const STFile *to) { int32_t code = 0; int32_t lino = 0; @@ -76,6 +99,33 @@ _exit: return code; } +static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile *to) { + int32_t code = 0; + int32_t lino = 0; + + char fname[TSDB_FILENAME_LEN]; + TdFilePtr fdFrom = NULL; + TdFilePtr fdTo = NULL; + + tsdbTFileName(rtner->tsdb, to, fname); + + fdFrom = taosOpenFile(from->fname, TD_FILE_READ); + if (fdFrom == NULL) code = terrno; + TSDB_CHECK_CODE(code, lino, _exit); + + char *object_name = taosDirEntryBaseName(fname); + s3PutObjectFromFile(from->fname, object_name); + + taosCloseFile(&fdFrom); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + taosCloseFile(&fdFrom); + } + return code; +} + static int32_t tsdbDoMigrateFileObj(SRTNer *rtner, const STFileObj *fobj, const SDiskID *did) { int32_t code = 0; int32_t lino = 0; @@ -123,6 +173,53 @@ _exit: return code; } +static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const SDiskID *did) { + int32_t code = 0; + int32_t lino = 0; + STFileOp op = {0}; + + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + // create new + op = (STFileOp){ + .optype = TSDB_FOP_CREATE, + .fid = fobj->f->fid, + .nf = + { + .type = fobj->f->type, + .did = did[0], + .fid = fobj->f->fid, + .cid = fobj->f->cid, + .size = fobj->f->size, + .stt[0] = + { + .level = fobj->f->stt[0].level, + }, + }, + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + // do copy the file + code = tsdbCopyFileS3(rtner, fobj, &op.nf); + TSDB_CHECK_CODE(code, lino, _exit); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + } + return code; +} + typedef struct { STsdb *tsdb; int32_t sync; @@ -201,8 +298,14 @@ static int32_t tsdbDoRetention2(void *arg) { for (int32_t ftype = 0; (ftype < TSDB_FTYPE_MAX) && (fobj = rtner->ctx->fset->farr[ftype], 1); ++ftype) { if (fobj == NULL) continue; - code = tsdbDoRemoveFileObject(rtner, fobj); - TSDB_CHECK_CODE(code, lino, _exit); + int32_t nlevel = tfsGetLevel(rtner->tsdb->pVnode->pTfs); + if (tsS3Enabled && nlevel > 1 && TSDB_FTYPE_DATA == ftype && fobj->f->did.level == nlevel - 1) { + code = tsdbRemoveFileObjectS3(rtner, fobj); + TSDB_CHECK_CODE(code, lino, _exit); + } else { + code = tsdbDoRemoveFileObject(rtner, fobj); + TSDB_CHECK_CODE(code, lino, _exit); + } } SSttLvl *lvl; @@ -228,8 +331,15 @@ static int32_t tsdbDoRetention2(void *arg) { if (fobj == NULL) continue; if (fobj->f->did.level == did.level) continue; - code = tsdbDoMigrateFileObj(rtner, fobj, &did); - TSDB_CHECK_CODE(code, lino, _exit); + + int32_t nlevel = tfsGetLevel(rtner->tsdb->pVnode->pTfs); + if (tsS3Enabled && nlevel > 1 && TSDB_FTYPE_DATA == ftype && did.level == nlevel - 1) { + code = tsdbMigrateDataFileS3(rtner, fobj, &did); + TSDB_CHECK_CODE(code, lino, _exit); + } else { + code = tsdbDoMigrateFileObj(rtner, fobj, &did); + TSDB_CHECK_CODE(code, lino, _exit); + } } // stt @@ -281,4 +391,4 @@ int32_t tsdbRetention(STsdb *tsdb, int64_t now, int32_t sync) { tsdbFreeRtnArg(arg); } return code; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c new file mode 100644 index 0000000000..1507df7074 --- /dev/null +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -0,0 +1,114 @@ +#define ALLOW_FORBID_FUNC + +#include "vndCos.h" + +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + +extern char tsS3Endpoint[]; +extern char tsS3AcessKeyId[]; +extern char tsS3AcessKeySecret[]; +extern char tsS3BucketName[]; +extern char tsS3AppId[]; + +int32_t s3Init() { + if (cos_http_io_initialize(NULL, 0) != COSE_OK) { + return -1; + } + + // set log level, default COS_LOG_WARN + cos_log_set_level(COS_LOG_WARN); + + // set log output, default stderr + cos_log_set_output(NULL); + + return 0; +} + +void s3CleanUp() { cos_http_io_deinitialize(); } + +static void log_status(cos_status_t *s) { + cos_warn_log("status->code: %d", s->code); + if (s->error_code) cos_warn_log("status->error_code: %s", s->error_code); + if (s->error_msg) cos_warn_log("status->error_msg: %s", s->error_msg); + if (s->req_id) cos_warn_log("status->req_id: %s", s->req_id); +} + +static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { + options->config = cos_config_create(options->pool); + + cos_config_t *config = options->config; + + cos_str_set(&config->endpoint, tsS3Endpoint); + cos_str_set(&config->access_key_id, tsS3AcessKeyId); + cos_str_set(&config->access_key_secret, tsS3AcessKeySecret); + cos_str_set(&config->appid, tsS3AppId); + + config->is_cname = is_cname; + + options->ctl = cos_http_controller_create(options->pool, 0); +} + +void s3PutObjectFromFile(const char *file_str, const char *object_str) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket, object, file; + cos_table_t *resp_headers; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_table_t *headers = NULL; + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + cos_str_set(&bucket, tsS3BucketName); + cos_str_set(&file, file_str); + cos_str_set(&object, object_str); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void s3DeleteObjects(const char *object_name[], int nobject) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_list_t object_list; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + + cos_list_init(&object_list); + cos_list_init(&deleted_object_list); + + for (int i = 0; i < nobject; ++i) { + cos_object_key_t *content = cos_create_cos_object_key(p); + cos_str_set(&content->key, object_name[i]); + cos_list_add_tail(&content->node, &object_list); + } + + cos_status_t *s = cos_delete_objects(options, &bucket, &object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + + cos_pool_destroy(p); + + if (cos_status_is_ok(s)) { + cos_warn_log("delete objects succeeded\n"); + } else { + cos_warn_log("delete objects failed\n"); + } +} diff --git a/source/dnode/vnode/src/vnd/vnodeModule.c b/source/dnode/vnode/src/vnd/vnodeModule.c index 74a8d14a86..6ccce5c9d7 100644 --- a/source/dnode/vnode/src/vnd/vnodeModule.c +++ b/source/dnode/vnode/src/vnd/vnodeModule.c @@ -14,6 +14,7 @@ */ #include "vnd.h" +#include "vndCos.h" typedef struct SVnodeTask SVnodeTask; struct SVnodeTask { @@ -81,6 +82,9 @@ int vnodeInit(int nthreads) { if (tqInit() < 0) { return -1; } + if (s3Init() < 0) { + return -1; + } return 0; } @@ -112,6 +116,7 @@ void vnodeCleanup() { walCleanUp(); tqCleanUp(); smaCleanUp(); + s3CleanUp(); } int vnodeScheduleTaskEx(int tpid, int (*execute)(void*), void* arg) { From ebd09ca532aecdd448dfe6693740270d55150f44 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 13:44:17 +0800 Subject: [PATCH 02/81] tsdb/write: use keep1 as minKey instead of keep2 --- source/dnode/vnode/src/tsdb/tsdbWrite.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c index 2dbac956ed..6e89b47adc 100644 --- a/source/dnode/vnode/src/tsdb/tsdbWrite.c +++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c @@ -76,7 +76,7 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq2 *pMsg) { int32_t code = 0; STsdbKeepCfg *pCfg = &pTsdb->keepCfg; TSKEY now = taosGetTimestamp(pCfg->precision); - TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep2; + TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep1; TSKEY maxKey = tsMaxKeyByPrecision[pCfg->precision]; int32_t size = taosArrayGetSize(pMsg->aSubmitTbData); @@ -107,4 +107,4 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq2 *pMsg) { _exit: return code; -} \ No newline at end of file +} From 87783a965082757fb358a5f4ceb8882f6303dfe6 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 15:37:52 +0800 Subject: [PATCH 03/81] s3query/get: pull object to local --- source/dnode/vnode/src/inc/vndCos.h | 3 + .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 21 +++++- source/dnode/vnode/src/vnd/vnodeCos.c | 75 +++++++++++++++++++ 3 files changed, 96 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index b8510213d7..d4e19e9031 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -28,6 +28,9 @@ int32_t s3Init(); void s3CleanUp(); void s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); +bool s3Exists(const char *object_name); +void s3Get(const char *object_name, const char *path); +void s3EvictCache(); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 89b7d019ae..96037ff6be 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -14,6 +14,7 @@ */ #include "tsdb.h" +#include "vndCos.h" // =============== PAGE-WISE FILE =============== int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **ppFD) { @@ -34,9 +35,23 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p pFD->flag = flag; pFD->pFD = taosOpenFile(path, flag); if (pFD->pFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - taosMemoryFree(pFD); - goto _exit; + const char *object_name = taosDirEntryBaseName((char *)path); + if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3Exists(object_name)) { + s3EvictCache(); + s3Get(object_name, path); + + pFD->pFD = taosOpenFile(path, flag); + + if (pFD->pFD == NULL) { + code = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(pFD); + goto _exit; + } + } else { + code = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(pFD); + goto _exit; + } } pFD->szPage = szPage; pFD->pgno = 0; diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 1507df7074..696632fc6f 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -112,3 +112,78 @@ void s3DeleteObjects(const char *object_name[], int nobject) { cos_warn_log("delete objects failed\n"); } } + +bool s3Exists(const char *object_name) { + bool ret = false; + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_object_exist_status_e object_exist; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + cos_str_set(&object, object_name); + + s = cos_check_object_exist(options, &bucket, &object, headers, &object_exist, &resp_headers); + if (object_exist == COS_OBJECT_NON_EXIST) { + cos_warn_log("object: %.*s non exist.\n", object.len, object.data); + } else if (object_exist == COS_OBJECT_EXIST) { + ret = true; + cos_warn_log("object: %.*s exist.\n", object.len, object.data); + } else { + cos_warn_log("object: %.*s unknown status.\n", object.len, object.data); + log_status(s); + } + + cos_pool_destroy(p); + + return ret; +} + +void s3Get(const char *object_name, const char *path) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + int traffic_limit = 0; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + if (traffic_limit) { + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + + //下载对象 + cos_str_set(&file, path); + cos_str_set(&object, object_name); + s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); + if (cos_status_is_ok(s)) { + cos_warn_log("get object succeeded\n"); + } else { + cos_warn_log("get object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void s3EvictCache() {} From 65d8af19ed74f697ac0fef7fb007b6cf67a23f9e Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 16:35:35 +0800 Subject: [PATCH 04/81] s3: new api s3Size --- source/dnode/vnode/src/inc/vndCos.h | 4 +- .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 3 +- source/dnode/vnode/src/vnd/vnodeCos.c | 45 ++++++++++++++++++- 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index d4e19e9031..6e0984c400 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -29,9 +29,9 @@ void s3CleanUp(); void s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); -void s3Get(const char *object_name, const char *path); +bool s3Get(const char *object_name, const char *path); void s3EvictCache(); - +long s3Size(const char *object_name); #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 96037ff6be..872042d9d5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -36,7 +36,8 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p pFD->pFD = taosOpenFile(path, flag); if (pFD->pFD == NULL) { const char *object_name = taosDirEntryBaseName((char *)path); - if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3Exists(object_name)) { + long s3_size = s3Size(object_name); + if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3_size > 0) { s3EvictCache(); s3Get(object_name, path); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 696632fc6f..a7b166b6c7 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -147,7 +147,8 @@ bool s3Exists(const char *object_name) { return ret; } -void s3Get(const char *object_name, const char *path) { +bool s3Get(const char *object_name, const char *path) { + bool ret = false; cos_pool_t *p = NULL; int is_cname = 0; cos_status_t *s = NULL; @@ -177,6 +178,7 @@ void s3Get(const char *object_name, const char *path) { cos_str_set(&object, object_name); s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); if (cos_status_is_ok(s)) { + ret = true; cos_warn_log("get object succeeded\n"); } else { cos_warn_log("get object failed\n"); @@ -184,6 +186,47 @@ void s3Get(const char *object_name, const char *path) { //销毁内存池 cos_pool_destroy(p); + + return ret; } void s3EvictCache() {} + +long s3Size(const char *object_name) { + long size = 0; + + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + + //获取对象元数据 + cos_str_set(&object, object_name); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + // print_headers(resp_headers); + if (cos_status_is_ok(s)) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + size = atol(content_length_str); + } + cos_warn_log("head object succeeded: %ld\n", size); + } else { + cos_warn_log("head object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); + + return size; +} From fac7e521e957ae1aaa279af48c0acd04c646817b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 7 Aug 2023 15:59:37 +0800 Subject: [PATCH 05/81] s3/evict: fetch atime from stat file --- include/os/osFile.h | 2 +- source/dnode/mgmt/mgmt_mnode/src/mmFile.c | 4 +- source/dnode/mgmt/mgmt_vnode/src/vmFile.c | 2 +- source/dnode/mgmt/node_util/src/dmEps.c | 6 +- source/dnode/mgmt/node_util/src/dmFile.c | 2 +- source/dnode/vnode/src/inc/vndCos.h | 3 +- source/dnode/vnode/src/tq/tqOffsetSnapshot.c | 2 +- source/dnode/vnode/src/tsdb/tsdbFS.c | 10 +- .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 4 +- source/dnode/vnode/src/vnd/vnodeCos.c | 66 +++++- source/libs/function/src/udfd.c | 8 +- source/libs/index/src/indexFstFile.c | 4 +- source/libs/parser/src/parTranslater.c | 207 +++++++++--------- source/libs/sync/src/syncRaftStore.c | 2 +- source/libs/wal/src/walMeta.c | 12 +- source/os/src/osFile.c | 28 ++- source/util/src/tlog.c | 60 ++--- tools/shell/src/shellEngine.c | 100 ++++----- utils/test/c/tmqDemo.c | 3 +- 19 files changed, 300 insertions(+), 225 deletions(-) diff --git a/include/os/osFile.h b/include/os/osFile.h index 0e93002706..da1f8f8b57 100644 --- a/include/os/osFile.h +++ b/include/os/osFile.h @@ -76,7 +76,7 @@ int32_t taosUnLockFile(TdFilePtr pFile); int32_t taosUmaskFile(int32_t maskVal); -int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime); +int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime); int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno); int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime); bool taosCheckExistFile(const char *pathname); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c index cb0849f4b9..64e18ef06d 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c @@ -46,7 +46,7 @@ static int32_t mmDecodeOption(SJson *pJson, SMnodeOpt *pOption) { if (code < 0) return -1; tjsonGetInt32ValueFromDouble(replica, "role", pOption->nodeRoles[i], code); if (code < 0) return -1; - if(pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER){ + if (pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER) { pOption->numOfReplicas++; } } @@ -65,7 +65,7 @@ int32_t mmReadFile(const char *path, SMnodeOpt *pOption) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%smnode.json", path, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("mnode file:%s not exist", file); return 0; } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index da7f4d4a56..ed32e75d18 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -97,7 +97,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t SWrapperCfg *pCfgs = NULL; snprintf(file, sizeof(file), "%s%svnodes.json", pMgmt->path, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("vnode file:%s not exist", file); return 0; } diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index 1564a09035..88f6b5da40 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -100,7 +100,7 @@ int32_t dmReadEps(SDnodeData *pData) { goto _OVER; } - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("dnode file:%s not exist", file); code = 0; goto _OVER; @@ -350,7 +350,7 @@ void dmRotateMnodeEpSet(SDnodeData *pData) { } void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet) { - if(!pData->validMnodeEps) return; + if (!pData->validMnodeEps) return; dmGetMnodeEpSet(pData, pEpSet); dTrace("msg is redirected, handle:%p num:%d use:%d", pMsg->info.handle, pEpSet->numOfEps, pEpSet->inUse); for (int32_t i = 0; i < pEpSet->numOfEps; ++i) { @@ -469,7 +469,7 @@ static int32_t dmReadDnodePairs(SDnodeData *pData) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%sdnode%sep.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dDebug("dnode file:%s not exist", file); code = 0; goto _OVER; diff --git a/source/dnode/mgmt/node_util/src/dmFile.c b/source/dnode/mgmt/node_util/src/dmFile.c index fb05f08c0c..c81efddcc1 100644 --- a/source/dnode/mgmt/node_util/src/dmFile.c +++ b/source/dnode/mgmt/node_util/src/dmFile.c @@ -38,7 +38,7 @@ int32_t dmReadFile(const char *path, const char *name, bool *pDeployed) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%s%s.json", path, TD_DIRSEP, name); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("file:%s not exist", file); code = 0; goto _OVER; diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index 6e0984c400..f6db7f096e 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -30,8 +30,9 @@ void s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); bool s3Get(const char *object_name, const char *path); -void s3EvictCache(); +void s3EvictCache(const char *path, long object_size); long s3Size(const char *object_name); + #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c index a4428aed43..6a66da30c6 100644 --- a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c +++ b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c @@ -60,7 +60,7 @@ int32_t tqOffsetSnapRead(STqOffsetReader* pReader, uint8_t** ppData) { } int64_t sz = 0; - if (taosStatFile(fname, &sz, NULL) < 0) { + if (taosStatFile(fname, &sz, NULL, NULL) < 0) { taosCloseFile(&pFile); taosMemoryFree(fname); return -1; diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index ec116c717e..c0c74d6b87 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -176,7 +176,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // SDelFile if (pTsdb->fs.pDelFile) { tsdbDelFileName(pTsdb, pTsdb->fs.pDelFile, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -195,7 +195,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // head ========= tsdbHeadFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pHeadF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -206,7 +206,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // data ========= tsdbDataFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pDataF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -221,7 +221,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // sma ============= tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -237,7 +237,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // stt =========== for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) { tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 872042d9d5..4d3b53bc5a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -38,7 +38,7 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p const char *object_name = taosDirEntryBaseName((char *)path); long s3_size = s3Size(object_name); if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3_size > 0) { - s3EvictCache(); + s3EvictCache(path, s3_size); s3Get(object_name, path); pFD->pFD = taosOpenFile(path, flag); @@ -66,7 +66,7 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p // not check file size when reading data files. if (flag != TD_FILE_READ) { - if (taosStatFile(path, &pFD->szFile, NULL) < 0) { + if (taosStatFile(path, &pFD->szFile, NULL, NULL) < 0) { code = TAOS_SYSTEM_ERROR(errno); taosMemoryFree(pFD->pBuf); taosCloseFile(&pFD->pFD); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index a7b166b6c7..bac38f7c35 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -190,7 +190,71 @@ bool s3Get(const char *object_name, const char *path) { return ret; } -void s3EvictCache() {} +typedef struct { + int64_t size; + int32_t atime; + char name[TSDB_FILENAME_LEN]; +} SEvictFile; + +static int32_t evictFileCompareAsce(const void *pLeft, const void *pRight) { + SEvictFile *lhs = (SEvictFile *)pLeft; + SEvictFile *rhs = (SEvictFile *)pRight; + return lhs->atime < rhs->atime ? -1 : 1; +} + +void s3EvictCache(const char *path, long object_size) { + SDiskSize disk_size = {0}; + if (taosGetDiskSize((char *)path, &disk_size) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + vError("failed to get disk:%s size since %s", path, terrstr()); + return; + } + + if (object_size >= disk_size.avail + 1 << 30) { + // evict too old files + // 1, list data files' atime under dir(path) + char dir_name[TSDB_FILENAME_LEN] = "\0"; + tstrncpy(dir_name, path, TSDB_FILENAME_LEN); + taosDirName(dir_name); + + tdbDirPtr pDir = taosOpenDir(dir_name); + if (pDir == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + vError("failed to open %s since %s", dir_name, terrstr()); + } + SArray *evict_files = taosArrayInit(16, sizeof(SEvictFile)); + tdbDirEntryPtr pDirEntry; + while ((pDirEntry = taosReadDir(pDir)) != NULL) { + char *name = taosGetDirEntryName(pDirEntry); + if (!strncmp(name + strlen(name) - 5, ".data", 5)) { + SEvictFile e_file = {0}; + + tstrncpy(e_file.name, name, TSDB_FILENAME_LEN); + taosStatFile(name, &e_file.size, NULL, &e_file.atime); + + taosArrayPush(evict_files, &e_file); + } + } + taosCloseDir(&pDir); + + // 2, sort by atime + taosArraySort(evict_files, evictFileCompareAsce); + + // 3, remove files ascendingly until we get enough object_size space + long evict_size = 0; + size_t ef_size = TARRAY_SIZE(evict_files); + for (size_t i = 0; i < ef_size; ++i) { + SEvictFile *evict_file = taosArrayGet(evict_files, i); + taosRemoveFile(evict_file->name); + evict_size += evict_file->size; + if (evict_size >= object_size) { + break; + } + } + + taosArrayDestroy(evict_files); + } +} long s3Size(const char *object_name) { long size = 0; diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 7371017111..575bce09bb 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -378,9 +378,9 @@ int32_t udfdInitializePythonPlugin(SUdfScriptPlugin *plugin) { "pyUdfDestroy", "pyUdfScalarProc", "pyUdfAggStart", "pyUdfAggFinish", "pyUdfAggProc", "pyUdfAggMerge"}; void **funcs[UDFD_MAX_PLUGIN_FUNCS] = { - (void **)&plugin->openFunc, (void **)&plugin->closeFunc, (void **)&plugin->udfInitFunc, - (void **)&plugin->udfDestroyFunc, (void **)&plugin->udfScalarProcFunc, (void **)&plugin->udfAggStartFunc, - (void **)&plugin->udfAggFinishFunc, (void **)&plugin->udfAggProcFunc, (void **)&plugin->udfAggMergeFunc}; + (void **)&plugin->openFunc, (void **)&plugin->closeFunc, (void **)&plugin->udfInitFunc, + (void **)&plugin->udfDestroyFunc, (void **)&plugin->udfScalarProcFunc, (void **)&plugin->udfAggStartFunc, + (void **)&plugin->udfAggFinishFunc, (void **)&plugin->udfAggProcFunc, (void **)&plugin->udfAggMergeFunc}; int32_t err = udfdLoadSharedLib(plugin->libPath, &plugin->lib, funcName, funcs, UDFD_MAX_PLUGIN_FUNCS); if (err != 0) { fnError("can not load python plugin. lib path %s", plugin->libPath); @@ -848,7 +848,7 @@ int32_t udfdSaveFuncBodyToFile(SFuncInfo *pFuncInfo, SUdf *udf) { char path[PATH_MAX] = {0}; udfdGetFuncBodyPath(udf, path); - bool fileExist = !(taosStatFile(path, NULL, NULL) < 0); + bool fileExist = !(taosStatFile(path, NULL, NULL, NULL) < 0); if (fileExist) { strncpy(udf->path, path, PATH_MAX); fnInfo("udfd func body file. reuse existing file %s", path); diff --git a/source/libs/index/src/indexFstFile.c b/source/libs/index/src/indexFstFile.c index e18d0bbad3..43f15f5196 100644 --- a/source/libs/index/src/indexFstFile.c +++ b/source/libs/index/src/indexFstFile.c @@ -162,7 +162,7 @@ static FORCE_INLINE int idxFileCtxGetSize(IFileCtx* ctx) { return ctx->offset; } else { int64_t file_size = 0; - taosStatFile(ctx->file.buf, &file_size, NULL); + taosStatFile(ctx->file.buf, &file_size, NULL, NULL); return (int)file_size; } } @@ -199,7 +199,7 @@ IFileCtx* idxFileCtxCreate(WriterType type, const char* path, bool readOnly, int code = taosFtruncateFile(ctx->file.pFile, 0); UNUSED(code); - code = taosStatFile(path, &ctx->file.size, NULL); + code = taosStatFile(path, &ctx->file.size, NULL, NULL); UNUSED(code); ctx->file.wBufOffset = 0; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8ce68a5c8c..6845496a03 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -31,8 +31,8 @@ #define SYSTABLE_SHOW_TYPE_OFFSET QUERY_NODE_SHOW_DNODES_STMT typedef struct SRewriteTbNameContext { - int32_t errCode; - char* pTbName; + int32_t errCode; + char* pTbName; } SRewriteTbNameContext; typedef struct STranslateContext { @@ -54,7 +54,7 @@ typedef struct STranslateContext { bool stableQuery; bool showRewrite; SNode* pPrevRoot; - SNode* pPostRoot; + SNode* pPostRoot; } STranslateContext; typedef struct SBuildTopicContext { @@ -278,10 +278,11 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode); static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode); static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal); -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt); -static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); -static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); -static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt); +static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); +static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); +static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_BY; } @@ -772,7 +773,8 @@ static SNodeList* getProjectList(const SNode* pNode) { static bool isTimeLineQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { - return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); + return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || + (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); } else if (QUERY_NODE_SET_OPERATOR == nodeType(pStmt)) { return TIME_LINE_GLOBAL == ((SSetOperator*)pStmt)->timeLineResMode; } else { @@ -791,7 +793,7 @@ static bool isGlobalTimeLineQuery(SNode* pStmt) { } static bool isTimeLineAlignedQuery(SNode* pStmt) { - SSelectStmt *pSelect = (SSelectStmt *)pStmt; + SSelectStmt* pSelect = (SSelectStmt*)pStmt; if (isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return true; } @@ -801,7 +803,7 @@ static bool isTimeLineAlignedQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT != nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return false; } - SSelectStmt *pSub = (SSelectStmt *)((STempTableNode*)pSelect->pFromTable)->pSubquery; + SSelectStmt* pSub = (SSelectStmt*)((STempTableNode*)pSelect->pFromTable)->pSubquery; if (nodesListMatch(pSelect->pPartitionByList, pSub->pPartitionByList)) { return true; } @@ -1394,7 +1396,7 @@ static bool isCountStar(SFunctionNode* pFunc) { } static int32_t rewriteCountStarAsCount1(STranslateContext* pCxt, SFunctionNode* pCount) { - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1596,9 +1598,11 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } - if (pSelect->hasInterpFunc && (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { + if (pSelect->hasInterpFunc && + (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s ignoring null value options cannot be used when applying to multiple columns", pFunc->functionName); + "%s ignoring null value options cannot be used when applying to multiple columns", + pFunc->functionName); } if (NULL != pSelect->pWindow || NULL != pSelect->pGroupByList) { @@ -1636,7 +1640,8 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu } SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && - !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { + !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && + !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, "%s function requires valid time series input", pFunc->functionName); } @@ -1706,8 +1711,8 @@ static int32_t translateForbidSysTableFunc(STranslateContext* pCxt, SFunctionNod return TSDB_CODE_SUCCESS; } - SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; - SNode* pTable = pSelect->pFromTable; + SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; + SNode* pTable = pSelect->pFromTable; if (NULL != pTable && QUERY_NODE_REAL_TABLE == nodeType(pTable) && TSDB_SYSTEM_TABLE == ((SRealTableNode*)pTable)->pMeta->tableType) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, pFunc->functionName); @@ -2296,7 +2301,8 @@ static EDealRes doCheckExprForGroupBy(SNode** pNode, void* pContext) { } } if (isScanPseudoColumnFunc(*pNode) || QUERY_NODE_COLUMN == nodeType(*pNode)) { - if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { + if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || + (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { return generateDealNodeErrMsg(pCxt, getGroupByErrorCode(pCxt), ((SExprNode*)(*pNode))->userAlias); } else { return rewriteColToSelectValFunc(pCxt, pNode); @@ -2391,14 +2397,14 @@ static int32_t checkHavingGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) if (NULL != pSelect->pHaving) { code = checkExprForGroupBy(pCxt, &pSelect->pHaving); } -/* - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); - } - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); + } + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); + } + */ return code; } @@ -2657,10 +2663,10 @@ static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSele static EDealRes doTranslateTbName(SNode** pNode, void* pContext) { switch (nodeType(*pNode)) { case QUERY_NODE_FUNCTION: { - SFunctionNode *pFunc = (SFunctionNode *)*pNode; + SFunctionNode* pFunc = (SFunctionNode*)*pNode; if (FUNCTION_TYPE_TBNAME == pFunc->funcType) { - SRewriteTbNameContext *pCxt = (SRewriteTbNameContext*)pContext; - SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + SRewriteTbNameContext* pCxt = (SRewriteTbNameContext*)pContext; + SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; return DEAL_RES_ERROR; @@ -2699,11 +2705,12 @@ static int32_t replaceTbName(STranslateContext* pCxt, SSelectStmt* pSelect) { } SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable; - if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { + if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && + TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { return TSDB_CODE_SUCCESS; } - SNode** pNode = NULL; + SNode** pNode = NULL; SRewriteTbNameContext pRewriteCxt = {0}; pRewriteCxt.pTbName = pTable->table.tableName; @@ -3110,7 +3117,8 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList code = scalarCalculateConstants(pCastFunc, &pCell->pNode); } if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) { - code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); + code = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); } else if (TSDB_CODE_SUCCESS != code) { code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch"); } @@ -3576,7 +3584,6 @@ static int32_t createDefaultEveryNode(STranslateContext* pCxt, SNode** pOutput) pEvery->isDuration = true; pEvery->literal = taosStrdup("1s"); - *pOutput = (SNode*)pEvery; return TSDB_CODE_SUCCESS; } @@ -3671,15 +3678,15 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->currClause = SQL_CLAUSE_PARTITION_BY; int32_t code = TSDB_CODE_SUCCESS; - + if (pSelect->pPartitionByList) { int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable); SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0); - if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && - 1 == pSelect->pPartitionByList->length && (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { + if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && 1 == pSelect->pPartitionByList->length && + (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { pSelect->timeLineResMode = TIME_LINE_MULTI; } - + code = translateExprList(pCxt, pSelect->pPartitionByList); } if (TSDB_CODE_SUCCESS == code) { @@ -3943,9 +3950,9 @@ static int32_t translateSetOperProject(STranslateContext* pCxt, SSetOperator* pS } snprintf(pRightExpr->aliasName, sizeof(pRightExpr->aliasName), "%s", pLeftExpr->aliasName); SNode* pProj = createSetOperProject(pSetOperator->stmtName, pLeft); - if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) - && ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID - && ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { + if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) && + ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID && + ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { ((SColumnNode*)pProj)->colId = PRIMARYKEY_TIMESTAMP_COL_ID; } if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSetOperator->pProjectionList, pProj)) { @@ -5725,7 +5732,6 @@ static int32_t translateRestoreDnode(STranslateContext* pCxt, SRestoreComponentN return buildCmdMsg(pCxt, TDMT_MND_RESTORE_DNODE, (FSerializeFunc)tSerializeSRestoreDnodeReq, &restoreReq); } - static int32_t getSmaIndexDstVgId(STranslateContext* pCxt, const char* pDbName, const char* pTableName, int32_t* pVgId) { SVgroupInfo vg = {0}; @@ -5853,7 +5859,7 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS } static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { - int32_t code = checkCreateSmaIndex(pCxt, pStmt); + int32_t code = checkCreateSmaIndex(pCxt, pStmt); pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq)); if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY; if (TSDB_CODE_SUCCESS == code) { @@ -5867,13 +5873,15 @@ int32_t createIntervalFromCreateSmaIndexStmt(SCreateIndexStmt* pStmt, SInterval* pInterval->interval = ((SValueNode*)pStmt->pOptions->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit; pInterval->offset = NULL != pStmt->pOptions->pOffset ? ((SValueNode*)pStmt->pOptions->pOffset)->datum.i : 0; - pInterval->sliding = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; - pInterval->slidingUnit = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; + pInterval->sliding = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; + pInterval->slidingUnit = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; pInterval->precision = pStmt->pOptions->tsPrecision; return TSDB_CODE_SUCCESS; } -int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void ** pResRow) { +int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { int32_t code = TSDB_CODE_SUCCESS; SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot; int64_t lastTs = 0; @@ -6041,7 +6049,7 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS toName(pCxt->pParseCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name); tNameGetFullDbName(&name, pReq->subDbName); tNameExtractFullName(&name, pReq->subStbName); - if(pStmt->pQuery != NULL) { + if (pStmt->pQuery != NULL) { code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL); } } else if ('\0' != pStmt->subDbName[0]) { @@ -6096,11 +6104,12 @@ static EDealRes checkColumnTagsInCond(SNode* pNode, void* pContext) { addTagList(&pCxt->pTags, nodesCloneNode(pNode)); } } - + return DEAL_RES_CONTINUE; } -static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, SNodeList** ppProjection) { +static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, + SNodeList** ppProjection) { SBuildTopicContext colCxt = {.colExists = false, .colNotFound = false, .pMeta = pMeta, .pTags = NULL}; nodesWalkExprPostOrder(pStmt->pWhere, checkColumnTagsInCond, &colCxt); if (colCxt.colNotFound) { @@ -6110,18 +6119,18 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* nodesDestroyList(colCxt.pTags); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Columns are forbidden in where clause"); } - if (NULL == colCxt.pTags) { // put one column to select -// for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { - SSchema* column = &pMeta->schema[0]; - SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); - if (NULL == col) { - return TSDB_CODE_OUT_OF_MEMORY; - } - strcpy(col->colName, column->name); - strcpy(col->node.aliasName, col->colName); - strcpy(col->node.userAlias, col->colName); - addTagList(&colCxt.pTags, (SNode*)col); -// } + if (NULL == colCxt.pTags) { // put one column to select + // for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { + SSchema* column = &pMeta->schema[0]; + SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == col) { + return TSDB_CODE_OUT_OF_MEMORY; + } + strcpy(col->colName, column->name); + strcpy(col->node.aliasName, col->colName); + strcpy(col->node.userAlias, col->colName); + addTagList(&colCxt.pTags, (SNode*)col); + // } } *ppProjection = colCxt.pTags; @@ -6129,13 +6138,13 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* } static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt* pStmt, SNode** pSelect) { - SParseContext* pParCxt = pCxt->pParseCxt; - SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, - .requestId = pParCxt->requestId, + SParseContext* pParCxt = pCxt->pParseCxt; + SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, + .requestId = pParCxt->requestId, .requestObjRefId = pParCxt->requestRid, .mgmtEps = pParCxt->mgmtEpSet}; - SName name; - STableMeta* pMeta = NULL; + SName name; + STableMeta* pMeta = NULL; int32_t code = getTableMetaImpl(pCxt, toName(pParCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name), &pMeta); if (code) { taosMemoryFree(pMeta); @@ -6144,7 +6153,7 @@ static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt if (TSDB_SUPER_TABLE != pMeta->tableType) { taosMemoryFree(pMeta); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Only supertable table can be used"); - } + } SNodeList* pProjection = NULL; code = checkCollectTopicTags(pCxt, pStmt, pMeta, &pProjection); @@ -6542,7 +6551,8 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must be of VARCHAR type"); } - if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { + if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && + subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must not has column when no partition by clause"); } @@ -6895,28 +6905,28 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta if (NULL == col) { return TSDB_CODE_OUT_OF_MEMORY; } - + strcpy(col->tableAlias, pTable); strcpy(col->colName, pMeta->schema[0].name); SNodeList* pParamterList = nodesMakeList(); if (NULL == pParamterList) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); return TSDB_CODE_OUT_OF_MEMORY; } - - int32_t code = nodesListStrictAppend(pParamterList, (SNode *)col); + + int32_t code = nodesListStrictAppend(pParamterList, (SNode*)col); if (code) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); nodesDestroyList(pParamterList); return code; } - + SNode* pFunc = (SNode*)createFunction("last", pParamterList); if (NULL == pFunc) { nodesDestroyList(pParamterList); return TSDB_CODE_OUT_OF_MEMORY; } - + SNodeList* pProjectionList = nodesMakeList(); if (NULL == pProjectionList) { nodesDestroyList(pParamterList); @@ -6928,8 +6938,8 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta nodesDestroyList(pProjectionList); return code; } - - code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt **)pQuery); + + code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt**)pQuery); if (code) { nodesDestroyList(pProjectionList); return code; @@ -6967,14 +6977,14 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt if (TSDB_CODE_SUCCESS == code && pStmt->pOptions->fillHistory) { SRealTableNode* pTable = (SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable); code = createLastTsSelectStmt(pTable->table.dbName, pTable->table.tableName, pTable->pMeta, &pStmt->pPrevQuery); -/* - if (TSDB_CODE_SUCCESS == code) { - STranslateContext cxt = {0}; - int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); - code = translateQuery(&cxt, pStmt->pPrevQuery); - destroyTranslateContext(&cxt); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code) { + STranslateContext cxt = {0}; + int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); + code = translateQuery(&cxt, pStmt->pPrevQuery); + destroyTranslateContext(&cxt); + } + */ } taosMemoryFree(pMeta); return code; @@ -7069,7 +7079,7 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow)) { return code; } - + SIntervalWindowNode* pWindow = (SIntervalWindowNode*)pSelect->pWindow; pInterval->interval = ((SValueNode*)pWindow->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pWindow->pInterval)->unit; @@ -7077,16 +7087,16 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval->sliding = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->datum.i : pInterval->interval); pInterval->slidingUnit = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->unit : pInterval->intervalUnit); - pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; + pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; return code; } int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { SCreateStreamStmt* pStmt = (SCreateStreamStmt*)pQuery->pRoot; - STranslateContext cxt = {0}; - SInterval interval = {0}; - int64_t lastTs = 0; + STranslateContext cxt = {0}; + SInterval interval = {0}; + int64_t lastTs = 0; int32_t code = initTranslateContext(pParseCxt, NULL, &cxt); if (TSDB_CODE_SUCCESS == code) { @@ -7121,7 +7131,6 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void return code; } - static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pStmt) { SMDropStreamReq dropReq = {0}; SName name; @@ -7152,7 +7161,7 @@ static int32_t translateResumeStream(STranslateContext* pCxt, SResumeStreamStmt* static int32_t readFromFile(char* pName, int32_t* len, char** buf) { int64_t filesize = 0; - if (taosStatFile(pName, &filesize, NULL) < 0) { + if (taosStatFile(pName, &filesize, NULL, NULL) < 0) { return TAOS_SYSTEM_ERROR(errno); } @@ -7246,7 +7255,7 @@ static int32_t translateGrantTagCond(STranslateContext* pCxt, SGrantStmt* pStmt, } } - int32_t code = createRealTableForGrantTable(pStmt, &pTable); + int32_t code = createRealTableForGrantTable(pStmt, &pTable); if (TSDB_CODE_SUCCESS == code) { SName name; code = getTableMetaImpl(pCxt, toName(pCxt->pParseCxt->acctId, pTable->table.dbName, pTable->table.tableName, &name), @@ -7806,7 +7815,8 @@ static SNodeList* createProjectCols(int32_t ncols, const char* const pCols[]) { return pProjections; } -static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { SSelectStmt* pSelect = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT); if (NULL == pSelect) { return TSDB_CODE_OUT_OF_MEMORY; @@ -7829,9 +7839,8 @@ static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, S return TSDB_CODE_SUCCESS; } - static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTable, int32_t numOfProjs, - const char* const pProjCol[], SSelectStmt** pStmt) { + const char* const pProjCol[], SSelectStmt** pStmt) { SNodeList* pProjectionList = NULL; if (numOfProjs >= 0) { pProjectionList = createProjectCols(numOfProjs, pProjCol); @@ -7843,13 +7852,15 @@ static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTabl return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } static int32_t createSelectStmtForShow(ENodeType showType, SSelectStmt** pStmt) { const SSysTableShowAdapter* pShow = &sysTableShowAdapter[showType - SYSTABLE_SHOW_TYPE_OFFSET]; - return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, pStmt); + return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, + pStmt); } static int32_t createSelectStmtForShowTableDist(SShowTableDistributedStmt* pStmt, SSelectStmt** pOutput) { @@ -7987,8 +7998,8 @@ static int32_t createShowTableTagsProjections(SNodeList** pProjections, SNodeLis static int32_t rewriteShowStableTags(STranslateContext* pCxt, SQuery* pQuery) { SShowTableTagsStmt* pShow = (SShowTableTagsStmt*)pQuery->pRoot; SSelectStmt* pSelect = NULL; - int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal, - -1, NULL, &pSelect); + int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, + ((SValueNode*)pShow->pTbName)->literal, -1, NULL, &pSelect); if (TSDB_CODE_SUCCESS == code) { code = createShowTableTagsProjections(&pSelect->pProjectionList, &pShow->pTags); } diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c index bd15567c87..051106b99d 100644 --- a/source/libs/sync/src/syncRaftStore.c +++ b/source/libs/sync/src/syncRaftStore.c @@ -42,7 +42,7 @@ int32_t raftStoreReadFile(SSyncNode *pNode) { const char *file = pNode->raftStorePath; SRaftStore *pStore = &pNode->raftStore; - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { sInfo("vgId:%d, raft store file:%s not exist, use default value", pNode->vgId, file); pStore->currentTerm = 0; pStore->voteFor.addr = 0; diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 01d23a7e96..2acdd975e5 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -53,7 +53,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); TdFilePtr pFile = taosOpenFile(fnameStr, TD_FILE_READ | TD_FILE_WRITE); if (pFile == NULL) { @@ -304,7 +304,7 @@ int walRepairLogFileTs(SWal* pWal, bool* updateMeta) { walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); int32_t mtime = 0; - if (taosStatFile(fnameStr, NULL, &mtime) < 0) { + if (taosStatFile(fnameStr, NULL, &mtime, NULL) < 0) { terrno = TAOS_SYSTEM_ERROR(errno); wError("vgId:%d, failed to stat file due to %s, file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); return -1; @@ -353,7 +353,7 @@ int walTrimIdxFile(SWal* pWal, int32_t fileIdx) { walBuildIdxName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); int64_t records = TMAX(0, pFileInfo->lastVer - pFileInfo->firstVer + 1); int64_t lastEndOffset = records * sizeof(SWalIdxEntry); @@ -436,7 +436,7 @@ int walCheckAndRepairMeta(SWal* pWal) { SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); - int32_t code = taosStatFile(fnameStr, &fileSize, NULL); + int32_t code = taosStatFile(fnameStr, &fileSize, NULL, NULL); if (code < 0) { terrno = TAOS_SYSTEM_ERROR(errno); wError("failed to stat file since %s. file:%s", terrstr(), fnameStr); @@ -522,7 +522,7 @@ int walCheckAndRepairIdxFile(SWal* pWal, int32_t fileIdx) { walBuildLogName(pWal, pFileInfo->firstVer, fLogNameStr); int64_t fileSize = 0; - if (taosStatFile(fnameStr, &fileSize, NULL) < 0 && errno != ENOENT) { + if (taosStatFile(fnameStr, &fileSize, NULL, NULL) < 0 && errno != ENOENT) { wError("vgId:%d, failed to stat file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); terrno = TAOS_SYSTEM_ERROR(errno); return -1; @@ -935,7 +935,7 @@ int walLoadMeta(SWal* pWal) { walBuildMetaName(pWal, metaVer, fnameStr); // read metafile int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); if (fileSize == 0) { (void)taosRemoveFile(fnameStr); wDebug("vgId:%d, wal find empty meta ver %d", pWal->cfg.vgId, metaVer); diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index dd670595f0..c4309b2c55 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -191,7 +191,7 @@ int32_t taosRenameFile(const char *oldName, const char *newName) { #endif } -int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { +int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime) { #ifdef WINDOWS struct _stati64 fileStat; int32_t code = _stati64(path, &fileStat); @@ -211,6 +211,10 @@ int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { *mtime = fileStat.st_mtime; } + if (atime != NULL) { + *atime = fileStat.st_mtime; + } + return 0; } int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno) { @@ -540,7 +544,7 @@ int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime) { #ifdef WINDOWS struct __stat64 fileStat; - int32_t code = _fstat64(pFile->fd, &fileStat); + int32_t code = _fstat64(pFile->fd, &fileStat); #else struct stat fileStat; int32_t code = fstat(pFile->fd, &fileStat); @@ -897,17 +901,17 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) { goto cmp_end; } - dstFp = gzdopen(pFile->fd, "wb6f"); - if (dstFp == NULL) { - ret = -3; - taosCloseFile(&pFile); - goto cmp_end; - } + dstFp = gzdopen(pFile->fd, "wb6f"); + if (dstFp == NULL) { + ret = -3; + taosCloseFile(&pFile); + goto cmp_end; + } - while (!feof(pSrcFile->fp)) { - len = (int32_t)fread(data, 1, compressSize, pSrcFile->fp); - (void)gzwrite(dstFp, data, len); - } + while (!feof(pSrcFile->fp)) { + len = (int32_t)fread(data, 1, compressSize, pSrcFile->fp); + (void)gzwrite(dstFp, data, len); + } cmp_end: if (pSrcFile) { diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index de7ad848ed..4a15b5b976 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -17,8 +17,8 @@ #include "tlog.h" #include "os.h" #include "tconfig.h" -#include "tjson.h" #include "tglobal.h" +#include "tjson.h" #define LOG_MAX_LINE_SIZE (10024) #define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) @@ -74,12 +74,12 @@ static SLogObj tsLogObj = {.fileNum = 1}; static int64_t tsAsyncLogLostLines = 0; static int32_t tsDaylightActive; /* Currently in daylight saving time. */ -bool tsLogEmbedded = 0; -bool tsAsyncLog = true; +bool tsLogEmbedded = 0; +bool tsAsyncLog = true; #ifdef ASSERT_NOT_CORE -bool tsAssert = false; +bool tsAssert = false; #else -bool tsAssert = true; +bool tsAssert = true; #endif int32_t tsNumOfLogLines = 10000000; int32_t tsLogKeepDays = 0; @@ -160,7 +160,7 @@ int32_t taosInitSlowLog() { tsLogObj.slowHandle = taosLogBuffNew(LOG_SLOW_BUF_SIZE); if (tsLogObj.slowHandle == NULL) return -1; - + taosUmaskFile(0); tsLogObj.slowHandle->pFile = taosOpenFile(fullName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); if (tsLogObj.slowHandle->pFile == NULL) { @@ -403,13 +403,13 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { strcpy(name, fn); strcat(name, ".0"); } - bool log0Exist = taosStatFile(name, NULL, &logstat0_mtime) >= 0; + bool log0Exist = taosStatFile(name, NULL, &logstat0_mtime, NULL) >= 0; if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { strcpy(name, fn); strcat(name, ".1"); } - bool log1Exist = taosStatFile(name, NULL, &logstat1_mtime) >= 0; + bool log1Exist = taosStatFile(name, NULL, &logstat1_mtime, NULL) >= 0; // if none of the log files exist, open 0, if both exists, open the old one if (!log0Exist && !log1Exist) { @@ -576,7 +576,7 @@ void taosPrintSlowLog(const char *format, ...) { } else { taosWriteFile(tsLogObj.slowHandle->pFile, buffer, len); } - + taosMemoryFree(buffer); } @@ -769,12 +769,12 @@ static void taosWriteLog(SLogBuff *pLogBuf) { static void *taosAsyncOutputLog(void *param) { SLogBuff *pLogBuf = (SLogBuff *)tsLogObj.logHandle; SLogBuff *pSlowBuf = (SLogBuff *)tsLogObj.slowHandle; - + setThreadName("log"); int32_t count = 0; int32_t updateCron = 0; int32_t writeInterval = 0; - + while (1) { writeInterval = TMIN(pLogBuf->writeInterval, pSlowBuf->writeInterval); count += writeInterval; @@ -834,12 +834,12 @@ bool taosAssertDebug(bool condition, const char *file, int32_t line, const char return true; } -void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, void *sigInfo) { +void taosLogCrashInfo(char *nodeType, char *pMsg, int64_t msgLen, int signum, void *sigInfo) { const char *flags = "UTL FATAL "; ELogLevel level = DEBUG_FATAL; int32_t dflag = 255; - char filepath[PATH_MAX] = {0}; - TdFilePtr pFile = NULL; + char filepath[PATH_MAX] = {0}; + TdFilePtr pFile = NULL; if (pMsg && msgLen > 0) { snprintf(filepath, sizeof(filepath), "%s%s.%sCrashLog", tsLogDir, TD_DIRSEP, nodeType); @@ -856,16 +856,16 @@ void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, vo int64_t writeSize = taosWriteFile(pFile, &msgLen, sizeof(msgLen)); if (sizeof(msgLen) != writeSize) { taosUnLockFile(pFile); - taosPrintLog(flags, level, dflag, "failed to write len to file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", - filepath, pFile, writeSize, sizeof(msgLen), terrstr()); + taosPrintLog(flags, level, dflag, "failed to write len to file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", + filepath, pFile, writeSize, sizeof(msgLen), terrstr()); goto _return; } writeSize = taosWriteFile(pFile, pMsg, msgLen); if (msgLen != writeSize) { taosUnLockFile(pFile); - taosPrintLog(flags, level, dflag, "failed to write file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", - filepath, pFile, writeSize, msgLen, terrstr()); + taosPrintLog(flags, level, dflag, "failed to write file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", + filepath, pFile, writeSize, msgLen, terrstr()); goto _return; } @@ -883,7 +883,7 @@ _return: taosPrintTrace(flags, level, dflag, 4); #elif !defined(WINDOWS) taosPrintLog(flags, level, dflag, "sender PID:%d cmdline:%s", ((siginfo_t *)sigInfo)->si_pid, - taosGetCmdlineByPID(((siginfo_t *)sigInfo)->si_pid)); + taosGetCmdlineByPID(((siginfo_t *)sigInfo)->si_pid)); taosPrintTrace(flags, level, dflag, 3); #else taosPrintTrace(flags, level, dflag, 8); @@ -892,17 +892,17 @@ _return: taosMemoryFree(pMsg); } -void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* pFd) { +void taosReadCrashInfo(char *filepath, char **pMsg, int64_t *pMsgLen, TdFilePtr *pFd) { const char *flags = "UTL FATAL "; ELogLevel level = DEBUG_FATAL; int32_t dflag = 255; TdFilePtr pFile = NULL; bool truncateFile = false; - char* buf = NULL; + char *buf = NULL; if (NULL == *pFd) { int64_t filesize = 0; - if (taosStatFile(filepath, &filesize, NULL) < 0) { + if (taosStatFile(filepath, &filesize, NULL, NULL) < 0) { if (ENOENT == errno) { return; } @@ -916,7 +916,7 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* return; } - pFile = taosOpenFile(filepath, TD_FILE_READ|TD_FILE_WRITE); + pFile = taosOpenFile(filepath, TD_FILE_READ | TD_FILE_WRITE); if (pFile == NULL) { if (ENOENT == errno) { return; @@ -926,7 +926,7 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* taosPrintLog(flags, level, dflag, "failed to open file:%s since %s", filepath, terrstr()); return; } - + taosLockFile(pFile); } else { pFile = *pFd; @@ -937,8 +937,8 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* if (sizeof(msgLen) != readSize) { truncateFile = true; if (readSize < 0) { - taosPrintLog(flags, level, dflag, "failed to read len from file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", - filepath, pFile, readSize, sizeof(msgLen), terrstr()); + taosPrintLog(flags, level, dflag, "failed to read len from file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", + filepath, pFile, readSize, sizeof(msgLen), terrstr()); } goto _return; } @@ -948,12 +948,12 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* taosPrintLog(flags, level, dflag, "failed to malloc buf, size:%" PRId64, msgLen); goto _return; } - + readSize = taosReadFile(pFile, buf, msgLen); if (msgLen != readSize) { truncateFile = true; - taosPrintLog(flags, level, dflag, "failed to read file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", - filepath, pFile, readSize, msgLen, terrstr()); + taosPrintLog(flags, level, dflag, "failed to read file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", filepath, + pFile, readSize, msgLen, terrstr()); goto _return; } @@ -981,7 +981,7 @@ void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile) { if (truncateFile) { taosFtruncateFile(pFile, 0); } - + taosUnLockFile(pFile); taosCloseFile(&pFile); } diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index e9dd067ac4..860622ea18 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -18,9 +18,9 @@ #define _GNU_SOURCE #define _XOPEN_SOURCE #define _DEFAULT_SOURCE -#include "shellInt.h" -#include "shellAuto.h" #include "geosWrapper.h" +#include "shellAuto.h" +#include "shellInt.h" static bool shellIsEmptyCommand(const char *cmd); static int32_t shellRunSingleCommand(char *command); @@ -41,9 +41,9 @@ static bool shellIsCommentLine(char *line); static void shellSourceFile(const char *file); static void shellGetGrantInfo(); -static void shellCleanup(void *arg); -static void *shellCancelHandler(void *arg); -static void *shellThreadLoop(void *arg); +static void shellCleanup(void *arg); +static void *shellCancelHandler(void *arg); +static void *shellThreadLoop(void *arg); bool shellIsEmptyCommand(const char *cmd) { for (char c = *cmd++; c != 0; c = *cmd++) { @@ -66,7 +66,7 @@ int32_t shellRunSingleCommand(char *command) { if (shellRegexMatch(command, "^[\t ]*clear[ \t;]*$", REG_EXTENDED | REG_ICASE)) { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-result" - system("clear"); + system("clear"); #pragma GCC diagnostic pop return 0; } @@ -142,8 +142,8 @@ int32_t shellRunCommand(char *command, bool recordHistory) { return 0; } - // add help or help; - if(strncasecmp(command, "help;", 5) == 0) { + // add help or help; + if (strncasecmp(command, "help;", 5) == 0) { showHelp(); return 0; } @@ -223,14 +223,14 @@ void shellRunSingleCommandImp(char *command) { } // pre string - char * pre = "Query OK"; + char *pre = "Query OK"; if (shellRegexMatch(command, "^\\s*delete\\s*from\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Delete OK"; - } else if(shellRegexMatch(command, "^\\s*insert\\s*into\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*insert\\s*into\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Insert OK"; - } else if(shellRegexMatch(command, "^\\s*create\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*create\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Create OK"; - } else if(shellRegexMatch(command, "^\\s*drop\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*drop\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Drop OK"; } @@ -295,7 +295,7 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { if (taosLocalTime(&tt, &ptm, buf) == NULL) { return buf; } - size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); + size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); if (precision == TSDB_TIME_PRECISION_NANO) { sprintf(buf + pos, ".%09d", ms); @@ -387,22 +387,20 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: - case TSDB_DATA_TYPE_JSON: - { - int32_t bufIndex = 0; - for (int32_t i = 0; i < length; i++) { + case TSDB_DATA_TYPE_JSON: { + int32_t bufIndex = 0; + for (int32_t i = 0; i < length; i++) { + buf[bufIndex] = val[i]; + bufIndex++; + if (val[i] == '\"') { buf[bufIndex] = val[i]; bufIndex++; - if (val[i] == '\"') { - buf[bufIndex] = val[i]; - bufIndex++; - } } - buf[bufIndex] = 0; - - taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); } - break; + buf[bufIndex] = 0; + + taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); + } break; case TSDB_DATA_TYPE_GEOMETRY: shellDumpHexValue(buf, val, length); taosFprintfFile(pFile, "%s", buf); @@ -535,12 +533,10 @@ void shellPrintString(const char *str, int32_t width) { if (width == 0) { printf("%s", str); - } - else if (len > width) { + } else if (len > width) { if (width <= 3) { printf("%.*s.", width - 1, str); - } - else { + } else { printf("%.*s...", width - 3, str); } } else { @@ -549,7 +545,7 @@ void shellPrintString(const char *str, int32_t width) { } void shellPrintGeometry(const unsigned char *val, int32_t length, int32_t width) { - if (length == 0) { //empty value + if (length == 0) { // empty value shellPrintString("", width); return; } @@ -565,7 +561,7 @@ void shellPrintGeometry(const unsigned char *val, int32_t length, int32_t width) char *outputWKT = NULL; code = doAsText(val, length, &outputWKT); if (code != TSDB_CODE_SUCCESS) { - shellPrintString(getThreadLocalGeosCtx()->errMsg, width); //should NOT happen + shellPrintString(getThreadLocalGeosCtx()->errMsg, width); // should NOT happen return; } @@ -612,27 +608,26 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t break; case TSDB_DATA_TYPE_FLOAT: if (tsEnableScience) { - printf("%*.7e",width,GET_FLOAT_VAL(val)); + printf("%*.7e", width, GET_FLOAT_VAL(val)); } else { n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.7f", width, GET_FLOAT_VAL(val)); if (n > SHELL_FLOAT_WIDTH) { - - printf("%*.7e", width,GET_FLOAT_VAL(val)); + printf("%*.7e", width, GET_FLOAT_VAL(val)); } else { - printf("%s", buf); + printf("%s", buf); } } break; case TSDB_DATA_TYPE_DOUBLE: if (tsEnableScience) { - snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15e", width,GET_DOUBLE_VAL(val)); + snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15e", width, GET_DOUBLE_VAL(val)); printf("%s", buf); } else { n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15f", width, GET_DOUBLE_VAL(val)); if (n > SHELL_DOUBLE_WIDTH) { - printf("%*.15e", width, GET_DOUBLE_VAL(val)); + printf("%*.15e", width, GET_DOUBLE_VAL(val)); } else { - printf("%*s", width,buf); + printf("%*s", width, buf); } } break; @@ -905,7 +900,7 @@ void shellReadHistory() { TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_READ | TD_FILE_STREAM); if (pFile == NULL) return; - char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); + char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); int32_t read_size = 0; while ((read_size = taosGetsFile(pFile, TSDB_MAX_ALLOWED_SQL_LEN, line)) != -1) { line[read_size - 1] = '\0'; @@ -922,8 +917,8 @@ void shellReadHistory() { taosMemoryFreeClear(line); taosCloseFile(&pFile); int64_t file_size; - if (taosStatFile(pHistory->file, &file_size, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) { - TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC); + if (taosStatFile(pHistory->file, &file_size, NULL, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) { + TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC); if (pFile == NULL) return; int32_t endIndex = pHistory->hstart; if (endIndex != 0) { @@ -945,7 +940,7 @@ void shellReadHistory() { void shellWriteHistory() { SShellHistory *pHistory = &shell.history; if (pHistory->hend == pHistory->hstart) return; - TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_APPEND); + TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_APPEND); if (pFile == NULL) return; for (int32_t i = pHistory->hstart; i != pHistory->hend;) { @@ -991,7 +986,7 @@ void shellSourceFile(const char *file) { tstrncpy(fullname, file, PATH_MAX); } - sprintf(sourceFileCommand, "source %s;",fullname); + sprintf(sourceFileCommand, "source %s;", fullname); shellRecordCommandToHistory(sourceFileCommand); TdFilePtr pFile = taosOpenFile(fullname, TD_FILE_READ | TD_FILE_STREAM); @@ -1001,7 +996,7 @@ void shellSourceFile(const char *file) { return; } - char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); + char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); while ((read_len = taosGetsFile(pFile, TSDB_MAX_ALLOWED_SQL_LEN, line)) != -1) { if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; line[--read_len] = '\0'; @@ -1044,7 +1039,8 @@ void shellGetGrantInfo() { int32_t code = taos_errno(tres); if (code != TSDB_CODE_SUCCESS) { - if (code != TSDB_CODE_OPS_NOT_SUPPORT && code != TSDB_CODE_MND_NO_RIGHTS && code != TSDB_CODE_PAR_PERMISSION_DENIED) { + if (code != TSDB_CODE_OPS_NOT_SUPPORT && code != TSDB_CODE_MND_NO_RIGHTS && + code != TSDB_CODE_PAR_PERMISSION_DENIED) { fprintf(stderr, "Failed to check Server Edition, Reason:0x%04x:%s\r\n\r\n", code, taos_errstr(tres)); } return; @@ -1080,7 +1076,8 @@ void shellGetGrantInfo() { } else if (strcmp(expiretime, "unlimited") == 0) { fprintf(stdout, "Server is Enterprise %s Edition, %s and will never expire.\r\n", serverVersion, sinfo); } else { - fprintf(stdout, "Server is Enterprise %s Edition, %s and will expire at %s.\r\n", serverVersion, sinfo, expiretime); + fprintf(stdout, "Server is Enterprise %s Edition, %s and will expire at %s.\r\n", serverVersion, sinfo, + expiretime); } taos_free_result(tres); @@ -1123,9 +1120,9 @@ void *shellCancelHandler(void *arg) { #ifdef WEBSOCKET } #endif - #ifdef WINDOWS +#ifdef WINDOWS printf("\n%s", shell.info.promptHeader); - #endif +#endif } return NULL; @@ -1165,8 +1162,7 @@ void *shellThreadLoop(void *arg) { } int32_t shellExecute() { - printf(shell.info.clientVersion, shell.info.cusName, - taos_get_client_info(), shell.info.cusName); + printf(shell.info.clientVersion, shell.info.cusName, taos_get_client_info(), shell.info.cusName); fflush(stdout); SShellArgs *pArgs = &shell.args; @@ -1233,13 +1229,13 @@ int32_t shellExecute() { taosSetSignal(SIGTERM, shellQueryInterruptHandler); taosSetSignal(SIGHUP, shellQueryInterruptHandler); taosSetSignal(SIGINT, shellQueryInterruptHandler); - + #ifdef WEBSOCKET if (!shell.args.restful && !shell.args.cloud) { #endif #ifndef WINDOWS printfIntroduction(); -#endif +#endif shellGetGrantInfo(); #ifdef WEBSOCKET } diff --git a/utils/test/c/tmqDemo.c b/utils/test/c/tmqDemo.c index ce069c2b05..64f536433e 100644 --- a/utils/test/c/tmqDemo.c +++ b/utils/test/c/tmqDemo.c @@ -221,7 +221,7 @@ int64_t getDirectorySize(char* dir) { totalSize += subDirSize; } else if (0 == strcmp(strchr(fileName, '.'), ".log")) { // only calc .log file size, and not include .idx file int64_t file_size = 0; - taosStatFile(subdir, &file_size, NULL); + taosStatFile(subdir, &file_size, NULL, NULL); totalSize += file_size; } } @@ -702,4 +702,3 @@ int main(int32_t argc, char* argv[]) { taosCloseFile(&g_fp); return 0; } - From 9e4da6c089b5db3f08716961983053476c207206 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 7 Aug 2023 17:14:58 +0800 Subject: [PATCH 06/81] s3/config: parsing s3 configuration --- source/common/src/tglobal.c | 41 +++++++++++++++++++++++---- source/dnode/vnode/src/vnd/vnodeCos.c | 8 +++--- 2 files changed, 39 insertions(+), 10 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 1f6d0800a5..fbc98715f0 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -236,8 +236,9 @@ bool tsFilterScalarMode = false; int32_t tsKeepTimeOffset = 0; // latency of data migration char tsS3Endpoint[TSDB_FQDN_LEN] = ""; -char tsS3AcessKeyId[TSDB_FQDN_LEN] = ""; -char tsS3AcessKeySecret[TSDB_FQDN_LEN] = ""; +char tsS3AccessKey[TSDB_FQDN_LEN] = ""; +char tsS3AccessKeyId[TSDB_FQDN_LEN] = ""; +char tsS3AccessKeySecret[TSDB_FQDN_LEN] = ""; char tsS3BucketName[TSDB_FQDN_LEN] = ""; char tsS3AppId[TSDB_FQDN_LEN] = ""; int8_t tsS3Enabled = false; @@ -263,6 +264,35 @@ int32_t taosSetTfsCfg(SConfig *pCfg) { int32_t taosSetTfsCfg(SConfig *pCfg); #endif +int32_t taosSetS3Cfg(SConfig *pCfg) { + tstrncpy(tsS3AccessKey, cfgGetItem(pCfg, "s3Accesskey")->str, TSDB_FQDN_LEN); + char *colon = strchr(tsS3AccessKey, ':'); + if (!colon) { + uError("invalid access key:%s", tsS3AccessKey); + return -1; + } + *colon = '\0'; + tstrncpy(tsS3AccessKeyId, tsS3AccessKey, TSDB_FQDN_LEN); + tstrncpy(tsS3AccessKeySecret, colon + 1, TSDB_FQDN_LEN); + tstrncpy(tsS3Endpoint, cfgGetItem(pCfg, "s3Endpoint")->str, TSDB_FQDN_LEN); + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); + char *cos = strstr(tsS3Endpoint, "cos."); + if (cos) { + char *appid = strrchr(tsS3BucketName, '-'); + if (!appid) { + uError("failed to locate appid in bucket:%s", tsS3BucketName); + return -1; + } else { + tstrncpy(tsS3AppId, appid + 1, TSDB_FQDN_LEN); + } + } + if (tsS3BucketName[0] != '<' && tsDiskCfgNum > 1) { + tsS3Enabled = true; + } + + return 0; +} + struct SConfig *taosGetCfg() { return tsCfg; } @@ -582,6 +612,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3Accesskey", tsS3AccessKey, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1; GRANT_CFG_ADD; @@ -972,8 +1004,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32; tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32; - tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); - GRANT_CFG_GET; return 0; } @@ -1298,8 +1328,6 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp); snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port); cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype); - } else if (strcasecmp("s3BucketName", name) == 0) { - tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); } else if (strcasecmp("sDebugFlag", name) == 0) { sDebugFlag = cfgGetItem(pCfg, "sDebugFlag")->i32; } else if (strcasecmp("smaDebugFlag", name) == 0) { @@ -1498,6 +1526,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile if (taosSetServerCfg(tsCfg)) return -1; if (taosSetReleaseCfg(tsCfg)) return -1; if (taosSetTfsCfg(tsCfg) != 0) return -1; + if (taosSetS3Cfg(tsCfg) != 0) return -1; } taosSetSystemCfg(tsCfg); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index bac38f7c35..a40e046972 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -7,8 +7,8 @@ #include "cos_log.h" extern char tsS3Endpoint[]; -extern char tsS3AcessKeyId[]; -extern char tsS3AcessKeySecret[]; +extern char tsS3AccessKeyId[]; +extern char tsS3AccessKeySecret[]; extern char tsS3BucketName[]; extern char tsS3AppId[]; @@ -41,8 +41,8 @@ static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { cos_config_t *config = options->config; cos_str_set(&config->endpoint, tsS3Endpoint); - cos_str_set(&config->access_key_id, tsS3AcessKeyId); - cos_str_set(&config->access_key_secret, tsS3AcessKeySecret); + cos_str_set(&config->access_key_id, tsS3AccessKeyId); + cos_str_set(&config->access_key_secret, tsS3AccessKeySecret); cos_str_set(&config->appid, tsS3AppId); config->is_cname = is_cname; From 1290f529daf34cb3cfd6fa3cff92b43e94694a10 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 7 Aug 2023 17:17:43 +0800 Subject: [PATCH 07/81] cos/example: turn head object on --- contrib/test/cos/main.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/contrib/test/cos/main.c b/contrib/test/cos/main.c index faaceee2e3..7e5e7c8c8b 100644 --- a/contrib/test/cos/main.c +++ b/contrib/test/cos/main.c @@ -530,7 +530,12 @@ void test_head_object() { s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); print_headers(resp_headers); if (cos_status_is_ok(s)) { - printf("head object succeeded\n"); + long size = 0; + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + size = atol(content_length_str); + } + printf("head object succeeded: %ld\n", size); } else { printf("head object failed\n"); } @@ -3045,7 +3050,7 @@ int main(int argc, char *argv[]) { // test_object(); // test_put_object_with_limit(); // test_get_object_with_limit(); - // test_head_object(); + test_head_object(); // test_gen_object_url(); // test_list_objects(); // test_list_directory(); From 16e015253b2c3f01981914f34743794e1d203e94 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:10:25 +0800 Subject: [PATCH 08/81] s3/mxml: remove os external dependency --- contrib/CMakeLists.txt | 31 ++++++++++++++++++++++++++++--- contrib/test/cos/CMakeLists.txt | 4 ++-- source/dnode/vnode/CMakeLists.txt | 2 +- 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index df9519d00f..db4d359938 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -124,6 +124,9 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) + file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif(${BUILD_WITH_COS}) @@ -157,6 +160,21 @@ if(${BUILD_GEOS}) cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif() +# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=unused-function") +# include(ExternalProject) +# ExternalProject_Add(mxml +# GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git +# GIT_TAG release-2.10 +# SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" +# #BINARY_DIR "" +# BUILD_IN_SOURCE TRUE +# CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build +# BUILD_COMMAND make +# INSTALL_COMMAND make install +# TEST_COMMAND "" +# ) + + # download dependencies configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . @@ -355,7 +373,10 @@ endif() # cos if(${BUILD_WITH_COS}) + if(NOT ${TD_WINDOWS}) + #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) + INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) @@ -363,11 +384,15 @@ if(${BUILD_WITH_COS}) add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL) target_include_directories( - cos_c_sdk - PUBLIC $ - ) + cos_c_sdk + PUBLIC $ + ) set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) + + else() + + endif(NOT ${TD_WINDOWS}) endif(${BUILD_WITH_COS}) # lucene diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 77c57e5a65..3eb484c2c5 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -39,11 +39,11 @@ target_include_directories( find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -find_library(MINIXML_LIBRARY mxml) +#find_library(MINIXML_LIBRARY mxml) find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) target_link_libraries(cosTest ${APR_LIBRARY}) -target_link_libraries(cosTest ${MINIXML_LIBRARY}) +target_link_libraries(cosTest mxml) target_link_libraries(cosTest ${CURL_LIBRARY}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 0612f924f5..eea81ea3d2 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -137,7 +137,7 @@ endif() find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -find_library(MINIXML_LIBRARY mxml) +#find_library(MINIXML_LIBRARY mxml) find_library(CURL_LIBRARY curl) target_link_libraries( From c4f7b5d530a58026b1b1b7b64b535e1e12887ecc Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:23:00 +0800 Subject: [PATCH 09/81] mxml: makefile for mxml --- cmake/mxml_CMakeLists.txt.in | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 cmake/mxml_CMakeLists.txt.in diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in new file mode 100644 index 0000000000..994aa6e2cb --- /dev/null +++ b/cmake/mxml_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# cos +ExternalProject_Add(mxml + GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git + GIT_TAG release-2.10 + SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" + #BINARY_DIR "" + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND make + INSTALL_COMMAND make install + TEST_COMMAND "" +) From 4d1155a5cfe6a329a4bb728e1c4aa3c3e3600a77 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:43:27 +0800 Subject: [PATCH 10/81] curl: makefile for curl --- cmake/curl_CMakeLists.txt.in | 12 ++++++++++++ contrib/test/cos/CMakeLists.txt | 4 ++-- source/dnode/vnode/CMakeLists.txt | 6 +++--- 3 files changed, 17 insertions(+), 5 deletions(-) create mode 100644 cmake/curl_CMakeLists.txt.in diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in new file mode 100644 index 0000000000..a23c5e7bab --- /dev/null +++ b/cmake/curl_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# curl +ExternalProject_Add(curl + GIT_REPOSITORY https://github.com/curl/curl.git + GIT_TAG curl-7_88_1 + SOURCE_DIR "${TD_CONTRIB_DIR}/curl" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 3eb484c2c5..38de8a25e8 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -40,10 +40,10 @@ target_include_directories( find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) -find_library(CURL_LIBRARY curl) +#find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) target_link_libraries(cosTest ${APR_LIBRARY}) target_link_libraries(cosTest mxml) -target_link_libraries(cosTest ${CURL_LIBRARY}) +target_link_libraries(cosTest curl) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index eea81ea3d2..562207268c 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -138,7 +138,7 @@ endif() find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) -find_library(CURL_LIBRARY curl) +#find_library(CURL_LIBRARY curl) target_link_libraries( vnode @@ -164,8 +164,8 @@ target_link_libraries( cos_c_sdk ${APR_UTIL_LIBRARY} ${APR_LIBRARY} - ${MINIXML_LIBRARY} - ${CURL_LIBRARY} + mxml + curl ) IF (TD_GRANT) From 7114ad63e5ac8846a11e221079031ee22bccc4f1 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:54:17 +0800 Subject: [PATCH 11/81] =?UTF-8?q?apr=EF=BC=9Amakefile=20for=20apr=20&=20ap?= =?UTF-8?q?r-util?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cmake/apr-util_CMakeLists.txt.in | 12 ++++++++++++ cmake/apr_CMakeLists.txt.in | 12 ++++++++++++ contrib/test/cos/CMakeLists.txt | 8 ++++---- source/dnode/vnode/CMakeLists.txt | 8 ++++---- 4 files changed, 32 insertions(+), 8 deletions(-) create mode 100644 cmake/apr-util_CMakeLists.txt.in create mode 100644 cmake/apr_CMakeLists.txt.in diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in new file mode 100644 index 0000000000..8471a05db6 --- /dev/null +++ b/cmake/apr-util_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# apr-util +ExternalProject_Add(apr + GIT_REPOSITORY https://github.com/apache/apr-util.git + GIT_TAG 1.5.4 + SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in new file mode 100644 index 0000000000..68b6f39c89 --- /dev/null +++ b/cmake/apr_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# apr +ExternalProject_Add(apr + GIT_REPOSITORY https://github.com/apache/apr.git + GIT_TAG 1.5.2 + SOURCE_DIR "${TD_CONTRIB_DIR}/apr" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 38de8a25e8..2d2e101877 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -37,13 +37,13 @@ target_include_directories( PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" ) -find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) #find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) -target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) -target_link_libraries(cosTest ${APR_LIBRARY}) +target_link_libraries(cosTest apr}) +target_link_libraries(cosTest apr-util}) target_link_libraries(cosTest mxml) target_link_libraries(cosTest curl) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 562207268c..cf7d205c00 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,8 +135,8 @@ else() endif() endif() -find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) #find_library(CURL_LIBRARY curl) @@ -162,8 +162,8 @@ target_link_libraries( # s3 cos_c_sdk - ${APR_UTIL_LIBRARY} - ${APR_LIBRARY} + apr + apr-util mxml curl ) From 2e0519b9609d98a119b8908199c073dd8cb07d9f Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 11:01:42 +0800 Subject: [PATCH 12/81] apr: fix apr & apr-util project names --- cmake/apr-util_CMakeLists.txt.in | 2 +- cmake/apr_CMakeLists.txt.in | 2 +- contrib/test/cos/CMakeLists.txt | 4 ++-- source/dnode/vnode/CMakeLists.txt | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 8471a05db6..c4dd943243 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -1,5 +1,5 @@ # apr-util -ExternalProject_Add(apr +ExternalProject_Add(aprutil-1 GIT_REPOSITORY https://github.com/apache/apr-util.git GIT_TAG 1.5.4 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 68b6f39c89..bfbe8196d3 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -1,5 +1,5 @@ # apr -ExternalProject_Add(apr +ExternalProject_Add(apr-1 GIT_REPOSITORY https://github.com/apache/apr.git GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 2d2e101877..f8804033de 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -43,7 +43,7 @@ target_include_directories( #find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) -target_link_libraries(cosTest apr}) -target_link_libraries(cosTest apr-util}) +target_link_libraries(cosTest apr-1}) +target_link_libraries(cosTest aprutil-1}) target_link_libraries(cosTest mxml) target_link_libraries(cosTest curl) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index cf7d205c00..a219990690 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -162,8 +162,8 @@ target_link_libraries( # s3 cos_c_sdk - apr - apr-util + apr-1 + aprutil-1 mxml curl ) From 57ba106371bbbcbf83b14ce8668b798aae861bbe Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 11:04:53 +0800 Subject: [PATCH 13/81] cos: move cmake prefix path after external building --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index db4d359938..0ef799e9da 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -125,7 +125,6 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) - set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) @@ -374,6 +373,7 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) + set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) From 398567ef4ca524cd74ee3f203ba6512cde7f523d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 11:14:00 +0800 Subject: [PATCH 14/81] contrib/cmake: add apr apr-util, and curl into makefile --- contrib/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 0ef799e9da..053266c533 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -125,6 +125,9 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) From 7e2859ed43e8ebf375a5808b0d35c3c191a725b6 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 13:03:37 +0800 Subject: [PATCH 15/81] apr: use tarball to avoid ./buildconf --- cmake/apr-util_CMakeLists.txt.in | 18 +++++++++++------- cmake/apr_CMakeLists.txt.in | 21 ++++++++++++++------- cmake/curl_CMakeLists.txt.in | 9 ++++----- contrib/CMakeLists.txt | 2 +- source/dnode/vnode/CMakeLists.txt | 16 ++++++++-------- 5 files changed, 38 insertions(+), 28 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index c4dd943243..b81745aeef 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -1,12 +1,16 @@ # apr-util ExternalProject_Add(aprutil-1 - GIT_REPOSITORY https://github.com/apache/apr-util.git - GIT_TAG 1.5.4 + URL https://dlcdn.apache.org//apr/apr-util-1.6.3.tar.gz + URL_HASH SHA256=2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983 + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/apache/apr-util.git + #GIT_TAG 1.5.4 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" - BINARY_DIR "" - #BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" + #BINARY_DIR "" + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build + BUILD_COMMAND make + INSTALL_COMMAND make install TEST_COMMAND "" ) diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index bfbe8196d3..037c2ee6cc 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -1,12 +1,19 @@ # apr ExternalProject_Add(apr-1 - GIT_REPOSITORY https://github.com/apache/apr.git - GIT_TAG 1.5.2 + URL https://dlcdn.apache.org//apr/apr-1.7.4.tar.gz + URL_HASH SHA256=a4137dd82a185076fa50ba54232d920a17c6469c30b0876569e1c2a05ff311d9 + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/apache/apr.git + #GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" - BINARY_DIR "" - #BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" + #BINARY_DIR "${CMAKE_BINARY_DIR}/build" + BUILD_IN_SOURCE TRUE + #CONFIGURE_COMMAND "" + #BUILD_COMMAND "" + #INSTALL_COMMAND "" + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND make + INSTALL_COMMAND make install TEST_COMMAND "" ) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index a23c5e7bab..cec4dda004 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -3,10 +3,9 @@ ExternalProject_Add(curl GIT_REPOSITORY https://github.com/curl/curl.git GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" - BINARY_DIR "" - #BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND make + INSTALL_COMMAND make install TEST_COMMAND "" ) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 053266c533..058b2cf042 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -125,10 +125,10 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif(${BUILD_WITH_COS}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index a219990690..0612f924f5 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,10 +135,10 @@ else() endif() endif() -#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -#find_library(MINIXML_LIBRARY mxml) -#find_library(CURL_LIBRARY curl) +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) target_link_libraries( vnode @@ -162,10 +162,10 @@ target_link_libraries( # s3 cos_c_sdk - apr-1 - aprutil-1 - mxml - curl + ${APR_UTIL_LIBRARY} + ${APR_LIBRARY} + ${MINIXML_LIBRARY} + ${CURL_LIBRARY} ) IF (TD_GRANT) From 56b348abf2e821fd58834566ff878ffc988bbf34 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 13:15:25 +0800 Subject: [PATCH 16/81] curl: use tarball --- cmake/curl_CMakeLists.txt.in | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index cec4dda004..cbfe939219 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -1,7 +1,10 @@ # curl ExternalProject_Add(curl - GIT_REPOSITORY https://github.com/curl/curl.git - GIT_TAG curl-7_88_1 + URL https://curl.se/download/curl-8.2.1.tar.gz + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/curl/curl.git + #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ From 376a2c2520dc3c5887bbb10f17a143ba9aeec407 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 13:31:18 +0800 Subject: [PATCH 17/81] curl: with openssl building --- cmake/curl_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index cbfe939219..e411cd893c 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -7,7 +7,7 @@ ExternalProject_Add(curl #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-openssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" From b08d5b4d42de9ae814c695478d7eee9c28616573 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 14:02:10 +0800 Subject: [PATCH 18/81] mxml: add include dir to vnode --- source/dnode/vnode/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 0612f924f5..6c107e0a22 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -195,6 +195,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + PUBLIC "${CMAKE_BINARY_DIR}/build/include" ) if(${BUILD_TEST}) From 30cbbc425fdf2a17574c77bea6846ca15af6ba75 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 14:31:17 +0800 Subject: [PATCH 19/81] cos: new update command to build every cmake --- cmake/apr-util_CMakeLists.txt.in | 1 + cmake/apr_CMakeLists.txt.in | 5 +---- cmake/curl_CMakeLists.txt.in | 3 ++- cmake/mxml_CMakeLists.txt.in | 1 + 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index b81745aeef..ee30787cb6 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -9,6 +9,7 @@ ExternalProject_Add(aprutil-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" #BINARY_DIR "" BUILD_IN_SOURCE TRUE + UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 037c2ee6cc..fa124de62c 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -7,11 +7,8 @@ ExternalProject_Add(apr-1 #GIT_REPOSITORY https://github.com/apache/apr.git #GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" - #BINARY_DIR "${CMAKE_BINARY_DIR}/build" BUILD_IN_SOURCE TRUE - #CONFIGURE_COMMAND "" - #BUILD_COMMAND "" - #INSTALL_COMMAND "" + UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index e411cd893c..47c4fd72a1 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -7,7 +7,8 @@ ExternalProject_Add(curl #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-openssl + UPDATE_COMMAND "" + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 994aa6e2cb..12c9ea7d89 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -5,6 +5,7 @@ ExternalProject_Add(mxml SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" #BINARY_DIR "" BUILD_IN_SOURCE TRUE + UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ BUILD_COMMAND make INSTALL_COMMAND make install From 5d0edcd17b16bc557087a7f148d9f6b2d69d39d4 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 15:41:23 +0800 Subject: [PATCH 20/81] cos: use /usr/local as prefix instead of debug/build --- cmake/apr-util_CMakeLists.txt.in | 3 ++- cmake/apr_CMakeLists.txt.in | 3 ++- cmake/curl_CMakeLists.txt.in | 3 ++- cmake/mxml_CMakeLists.txt.in | 3 ++- contrib/CMakeLists.txt | 4 ++-- 5 files changed, 10 insertions(+), 6 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index ee30787cb6..fc4f92858c 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -10,7 +10,8 @@ ExternalProject_Add(aprutil-1 #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build + CONFIGURE_COMMAND ./configure --with-apr=/usr/local/ BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index fa124de62c..57e2014c31 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -9,7 +9,8 @@ ExternalProject_Add(apr-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 47c4fd72a1..fcd16a0518 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -8,7 +8,8 @@ ExternalProject_Add(curl SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl + CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 12c9ea7d89..cdd3e5b301 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,8 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 058b2cf042..507928cbe9 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -124,7 +124,7 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + #file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -376,7 +376,7 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) + #set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) From 93ce558abf20e429980a6901344098bf16a0494c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 15:49:34 +0800 Subject: [PATCH 21/81] apu: fix with-apr config --- cmake/apr-util_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index fc4f92858c..96a8b3ef75 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build - CONFIGURE_COMMAND ./configure --with-apr=/usr/local/ + CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" From bc64d5f769f6d5fdb7baf7b43e81ef2708fa04b8 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 16:53:19 +0800 Subject: [PATCH 22/81] cos: use ~/local as prefix for building --- cmake/apr-util_CMakeLists.txt.in | 4 ++-- cmake/apr_CMakeLists.txt.in | 4 ++-- cmake/curl_CMakeLists.txt.in | 4 ++-- cmake/mxml_CMakeLists.txt.in | 4 ++-- contrib/CMakeLists.txt | 4 ++-- tests/parallel_test/container_build.sh | 1 + 6 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 96a8b3ef75..1ae52c69af 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -10,8 +10,8 @@ ExternalProject_Add(aprutil-1 #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build - CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ --with-apr=$ENV{HOME}/local + #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 57e2014c31..1df68919ae 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -9,8 +9,8 @@ ExternalProject_Add(apr-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ - CONFIGURE_COMMAND ./configure + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ + #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index fcd16a0518..b09e85b9b2 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -8,8 +8,8 @@ ExternalProject_Add(curl SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl - CONFIGURE_COMMAND ./configure --without-ssl + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local --without-ssl + #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index cdd3e5b301..33dc48ab4e 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,8 +6,8 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ - CONFIGURE_COMMAND ./configure + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local + #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 507928cbe9..cc93226d68 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -124,7 +124,7 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) - #file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + file(MAKE_DIRECTORY $ENV{HOME}/local/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -376,7 +376,7 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - #set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) + set(CMAKE_PREFIX_PATH $ENV{HOME}/local) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 5ae061072a..699a4667dd 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -88,6 +88,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/local:/root/local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From 1ce3ef7fd52c3b86c2a2a68481cb10fa9c4b2602 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 17:02:16 +0800 Subject: [PATCH 23/81] cos: fix local/include directory --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index cc93226d68..9feefa6947 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -379,7 +379,7 @@ if(${BUILD_WITH_COS}) set(CMAKE_PREFIX_PATH $ENV{HOME}/local) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) + INCLUDE_DIRECTORIES($ENV{HOME}/local/include) set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) From 4911b6c8558beb4948fef3dbd1d6c46dfe630f81 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 17:07:55 +0800 Subject: [PATCH 24/81] container_build: use local as install dir --- tests/parallel_test/container_build.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 699a4667dd..8de8f377fd 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,6 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/local:/root/local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From a5ccc3e8aa39f91a85931129dffc6aeea8e34767 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 17:53:25 +0800 Subject: [PATCH 25/81] apr: make apr, apu, curl build always --- cmake/apr-util_CMakeLists.txt.in | 3 ++- cmake/apr_CMakeLists.txt.in | 4 +++- cmake/curl_CMakeLists.txt.in | 3 ++- cmake/mxml_CMakeLists.txt.in | 2 +- contrib/CMakeLists.txt | 15 --------------- 5 files changed, 8 insertions(+), 19 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 1ae52c69af..c64e4ffcdb 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -9,7 +9,8 @@ ExternalProject_Add(aprutil-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" #BINARY_DIR "" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ --with-apr=$ENV{HOME}/local #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 1df68919ae..bae8cfe0a6 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -8,7 +8,9 @@ ExternalProject_Add(apr-1 #GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + UPDATE_DISCONNECTED TRUE + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ #CONFIGURE_COMMAND ./configure BUILD_COMMAND make diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index b09e85b9b2..27457ffdbc 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -7,7 +7,8 @@ ExternalProject_Add(curl #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local --without-ssl #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 33dc48ab4e..f9b7e8e642 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -5,7 +5,7 @@ ExternalProject_Add(mxml SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" #BINARY_DIR "" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local #CONFIGURE_COMMAND ./configure BUILD_COMMAND make diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 9feefa6947..3fb7b93abe 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -162,21 +162,6 @@ if(${BUILD_GEOS}) cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif() -# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=unused-function") -# include(ExternalProject) -# ExternalProject_Add(mxml -# GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git -# GIT_TAG release-2.10 -# SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" -# #BINARY_DIR "" -# BUILD_IN_SOURCE TRUE -# CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build -# BUILD_COMMAND make -# INSTALL_COMMAND make install -# TEST_COMMAND "" -# ) - - # download dependencies configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . From a853d9d40cd9c937417d90692ad62c58e020e486 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 18:21:55 +0800 Subject: [PATCH 26/81] mxml: use ~/local/include --- source/dnode/vnode/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 6c107e0a22..e6af282d10 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -195,7 +195,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "${CMAKE_BINARY_DIR}/build/include" + PUBLIC "$ENV{HOME}/local/include" ) if(${BUILD_TEST}) From ca5571d0d6fb05b622e93d90bc3d5f30077ad1ec Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 18:48:59 +0800 Subject: [PATCH 27/81] config: fix default configs --- source/common/src/tglobal.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index fbc98715f0..91ab9f62d5 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -266,6 +266,9 @@ int32_t taosSetTfsCfg(SConfig *pCfg); int32_t taosSetS3Cfg(SConfig *pCfg) { tstrncpy(tsS3AccessKey, cfgGetItem(pCfg, "s3Accesskey")->str, TSDB_FQDN_LEN); + if (tsS3AccessKey[0] == '<') { + return 0; + } char *colon = strchr(tsS3AccessKey, ':'); if (!colon) { uError("invalid access key:%s", tsS3AccessKey); From cd63e814500cdd9138e674831e0bbef664fc2b75 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:18:12 +0800 Subject: [PATCH 28/81] cos: separate building phase for apr & apr-util --- contrib/CMakeLists.txt | 42 +++++++++++++++++++++++++------ source/dnode/vnode/CMakeLists.txt | 2 +- 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 3fb7b93abe..e8f6c98efe 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -6,6 +6,35 @@ function(cat IN_FILE OUT_FILE) file(APPEND ${OUT_FILE} "${CONTENTS}") endfunction(cat IN_FILE OUT_FILE) +set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") +configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + +if(${BUILD_WITH_COS}) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local/) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) +endif(${BUILD_WITH_COS}) + +configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") +execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +execute_process(COMMAND "${CMAKE_COMMAND}" --build . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + +set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2") +configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) + +if(${BUILD_WITH_COS}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +endif(${BUILD_WITH_COS}) + +configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") +execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +execute_process(COMMAND "${CMAKE_COMMAND}" --build . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -124,11 +153,10 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY $ENV{HOME}/local/) - cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif(${BUILD_WITH_COS}) @@ -361,10 +389,10 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - set(CMAKE_PREFIX_PATH $ENV{HOME}/local) + set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES($ENV{HOME}/local/include) + INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local/include) set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index e6af282d10..3cfcc9b716 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -195,7 +195,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "$ENV{HOME}/local/include" + PUBLIC "$ENV{HOME}/.cos-local/include" ) if(${BUILD_TEST}) From bb0b80e42df5c0c3b6af41ccdbd4767ccf3684de Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:24:51 +0800 Subject: [PATCH 29/81] cmake: use .cos-local as prebuilt directory --- cmake/apr-util_CMakeLists.txt.in | 2 +- cmake/apr_CMakeLists.txt.in | 2 +- cmake/curl_CMakeLists.txt.in | 2 +- cmake/mxml_CMakeLists.txt.in | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index c64e4ffcdb..6172be380e 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ --with-apr=$ENV{HOME}/local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ --with-apr=$ENV{HOME}/.cos-local #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index bae8cfe0a6..538b45a7f9 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(apr-1 UPDATE_DISCONNECTED TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 27457ffdbc..1d9d028848 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local --without-ssl + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --without-ssl #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index f9b7e8e642..87b126d8d3 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,7 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install From f3b56a0687e2b4f0127e3eff22787783f4a59154 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:28:23 +0800 Subject: [PATCH 30/81] apu: fix apr-util building --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index e8f6c98efe..d20b205e69 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -26,7 +26,7 @@ set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) if(${BUILD_WITH_COS}) - cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) endif(${BUILD_WITH_COS}) configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") From b739a422e33da2c44842b9afb4674cd1ded0589d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:31:11 +0800 Subject: [PATCH 31/81] container-build: use .cos-local for prebuilt building --- tests/parallel_test/container_build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 8de8f377fd..62254984a9 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,7 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/local:/root/local \ + -v /root/.cos-local:/root/.cos-local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ @@ -89,7 +89,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/local:/root/local \ + -v /root/.cos-local:/root/.cos-local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From c04ada3573e86ddf47a5e5fc67f79557c56b5c66 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 15:28:22 +0800 Subject: [PATCH 32/81] cos: link with static libs --- source/dnode/vnode/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 3cfcc9b716..c036fbc54a 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,6 +135,7 @@ else() endif() endif() +set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) find_library(MINIXML_LIBRARY mxml) From b2e615d4e70a78a8ef80bf4cf3e4ce7af6c9381e Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 10 Aug 2023 17:30:01 +0800 Subject: [PATCH 33/81] enhance: tag scan cursor based block --- include/libs/executor/storageapi.h | 14 +- source/dnode/vnode/src/inc/vnodeInt.h | 2 +- source/dnode/vnode/src/meta/metaQuery.c | 12 +- source/dnode/vnode/src/vnd/vnodeInitApi.c | 4 + source/libs/executor/inc/executil.h | 2 + source/libs/executor/inc/executorInt.h | 4 + source/libs/executor/inc/operator.h | 2 +- source/libs/executor/src/executil.c | 4 +- source/libs/executor/src/operator.c | 2 +- source/libs/executor/src/scanoperator.c | 271 +++++++++++++++++++++- 10 files changed, 298 insertions(+), 19 deletions(-) diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h index 773f373a2d..724d6638db 100644 --- a/include/libs/executor/storageapi.h +++ b/include/libs/executor/storageapi.h @@ -98,6 +98,16 @@ typedef struct SMTbCursor { int8_t paused; } SMTbCursor; +typedef struct SMCtbCursor { + SMeta *pMeta; + void *pCur; + tb_uid_t suid; + void *pKey; + void *pVal; + int kLen; + int vLen; +} SMCtbCursor; + typedef struct SRowBuffPos { void* pRowBuff; void* pKey; @@ -278,13 +288,15 @@ typedef struct SStoreMeta { void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables, int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) & // metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta); - int64_t (*getNumOfRowsInMem)(void* pVnode); /** int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list); int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg); int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list); */ + SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock); + void (*closeCtbCursor)(SMCtbCursor *pCtbCur, int lock); + tb_uid_t (*ctbCursorNext)(SMCtbCursor* pCur); } SStoreMeta; typedef struct SStoreMetaReader { diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index cd7704940b..e3b2d3e41e 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -167,7 +167,7 @@ int metaAddIndexToSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq); int metaDropIndexFromSTable(SMeta* pMeta, int64_t version, SDropIndexReq* pReq); int64_t metaGetTimeSeriesNum(SMeta* pMeta); -SMCtbCursor* metaOpenCtbCursor(SMeta* pMeta, tb_uid_t uid, int lock); +SMCtbCursor* metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock); void metaCloseCtbCursor(SMCtbCursor* pCtbCur, int lock); tb_uid_t metaCtbCursorNext(SMCtbCursor* pCtbCur); SMStbCursor* metaOpenStbCursor(SMeta* pMeta, tb_uid_t uid); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index c26bb45c2b..31c7bc8500 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -408,17 +408,9 @@ _err: return NULL; } -struct SMCtbCursor { - SMeta *pMeta; - TBC *pCur; - tb_uid_t suid; - void *pKey; - void *pVal; - int kLen; - int vLen; -}; -SMCtbCursor *metaOpenCtbCursor(SMeta *pMeta, tb_uid_t uid, int lock) { +SMCtbCursor *metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock) { + SMeta* pMeta = ((SVnode*)pVnode)->pMeta; SMCtbCursor *pCtbCur = NULL; SCtbIdxKey ctbIdxKey; int ret = 0; diff --git a/source/dnode/vnode/src/vnd/vnodeInitApi.c b/source/dnode/vnode/src/vnd/vnodeInitApi.c index 5c8d563d73..dca8dd271c 100644 --- a/source/dnode/vnode/src/vnd/vnodeInitApi.c +++ b/source/dnode/vnode/src/vnd/vnodeInitApi.c @@ -96,6 +96,10 @@ void initMetadataAPI(SStoreMeta* pMeta) { pMeta->metaGetCachedTbGroup = metaGetCachedTbGroup; pMeta->metaPutTbGroupToCache = metaPutTbGroupToCache; + + pMeta->openCtbCursor = metaOpenCtbCursor; + pMeta->closeCtbCursor = metaCloseCtbCursor; + pMeta->ctbCursorNext = metaCtbCursorNext; } void initTqAPI(SStoreTqReader* pTq) { diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 33c9d845b9..f273f63770 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -190,4 +190,6 @@ void printDataBlock(SSDataBlock* pBlock, const char* flag); void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order); void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery); +SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, + SStorageAPI* pStorageAPI); #endif // TDENGINE_EXECUTIL_H diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index fbca5e29f9..cadf367481 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -259,6 +259,10 @@ typedef struct STagScanInfo { SLimitNode* pSlimit; SReadHandle readHandle; STableListInfo* pTableListInfo; + uint64_t suid; + void* pCtbCursor; + SNode* pTagCond; + SNode* pTagIndexCond; } STagScanInfo; typedef enum EStreamScanMode { diff --git a/source/libs/executor/inc/operator.h b/source/libs/executor/inc/operator.h index e6c3405d7f..38cefc1cc5 100644 --- a/source/libs/executor/inc/operator.h +++ b/source/libs/executor/inc/operator.h @@ -81,7 +81,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode*pTagIndexCond, SExecTaskInfo* pTaskInfo); SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index aa0c7945b0..5bb8f8a38b 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -47,8 +47,6 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* p static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI); -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, - SStorageAPI* pStorageAPI); static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; } static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; } @@ -846,7 +844,7 @@ static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, S return -1; } -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, +SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, SStorageAPI* pStorageAPI) { SSDataBlock* pResBlock = createDataBlock(); if (pResBlock == NULL) { diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 8ddcc8fd15..0fc1b77b73 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -380,7 +380,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR return NULL; } - pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTaskInfo); + pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN == type) { SBlockDistScanPhysiNode* pBlockNode = (SBlockDistScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71b0747be8..24ed717c8a 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2688,6 +2688,271 @@ static void doTagScanOneTable(SOperatorInfo* pOperator, const SSDataBlock* pRes, } } +static void tagScanFreeUidTag(void* p) { + STUidTagInfo* pInfo = p; + if (pInfo->pTagVal != NULL) { + taosMemoryFree(pInfo->pTagVal); + } +} + +static int32_t tagScanCreateResultData(SDataType* pType, int32_t numOfRows, SScalarParam* pParam) { + SColumnInfoData* pColumnData = taosMemoryCalloc(1, sizeof(SColumnInfoData)); + if (pColumnData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return terrno; + } + + pColumnData->info.type = pType->type; + pColumnData->info.bytes = pType->bytes; + pColumnData->info.scale = pType->scale; + pColumnData->info.precision = pType->precision; + + int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows, true); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + taosMemoryFree(pColumnData); + return terrno; + } + + pParam->columnData = pColumnData; + pParam->colAlloced = true; + return TSDB_CODE_SUCCESS; +} + +typedef struct STagScanFilterContext { + SHashObj* colHash; + int32_t index; + SArray* cInfoList; +} STagScanFilterContext; + +static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { + SColumnNode* pSColumnNode = NULL; + if (QUERY_NODE_COLUMN == nodeType((*pNode))) { + pSColumnNode = *(SColumnNode**)pNode; + } else if (QUERY_NODE_FUNCTION == nodeType((*pNode))) { + SFunctionNode* pFuncNode = *(SFunctionNode**)(pNode); + if (pFuncNode->funcType == FUNCTION_TYPE_TBNAME) { + pSColumnNode = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == pSColumnNode) { + return DEAL_RES_ERROR; + } + pSColumnNode->colId = -1; + pSColumnNode->colType = COLUMN_TYPE_TBNAME; + pSColumnNode->node.resType.type = TSDB_DATA_TYPE_VARCHAR; + pSColumnNode->node.resType.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE; + nodesDestroyNode(*pNode); + *pNode = (SNode*)pSColumnNode; + } else { + return DEAL_RES_CONTINUE; + } + } else { + return DEAL_RES_CONTINUE; + } + + STagScanFilterContext* pCtx = (STagScanFilterContext*)pContext; + void* data = taosHashGet(pCtx->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId)); + if (!data) { + taosHashPut(pCtx->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode))); + pSColumnNode->slotId = pCtx->index++; + SColumnInfo cInfo = {.colId = pSColumnNode->colId, + .type = pSColumnNode->node.resType.type, + .bytes = pSColumnNode->node.resType.bytes}; + taosArrayPush(pCtx->cInfoList, &cInfo); + } else { + SColumnNode* col = *(SColumnNode**)data; + pSColumnNode->slotId = col->slotId; + } + + return DEAL_RES_CONTINUE; +} + + +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aUidTagIdxs, void* pVnode, SStorageAPI* pAPI) { + int32_t code = 0; + int32_t numOfTables = taosArrayGetSize(aUidTags); + STagScanFilterContext ctx = {0}; + ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + + nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&ctx); + + SSDataBlock* pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); + if (pResBlock == NULL) { + + } + + SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); + taosArrayPush(pBlockList, &pResBlock); + SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; + + SScalarParam output = {0}; + code = tagScanCreateResultData(&type, numOfTables, &output); + if (code != TSDB_CODE_SUCCESS) { + + } + + code = scalarCalculate(pTagCond, pBlockList, &output); + if (code != TSDB_CODE_SUCCESS) { + } + + bool* result = (bool*)output.columnData->pData; + for (int32_t i = 0 ; i < numOfTables; ++i) { + if (result[i]) { + taosArrayPush(aUidTagIdxs, &i); + } + } + + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + colDataDestroy(output.columnData); + taosMemoryFreeClear(output.columnData); +} + +static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { + if (fmIsScanPseudoColumnFunc(pExprInfo->pExpr->_function.functionId)) { // tbname + char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(str, "zsl"); + // if (pUidTagInfo->name != NULL) { + // STR_TO_VARSTR(str, pUidTagInfo->name); + // } else { // name is not retrieved during filter + // pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); + // } + + colDataSetVal(pColInfo, rowIndex, str, false); + } else { + STagVal tagVal = {0}; + tagVal.cid = pExprInfo->base.pParam[0].pCol->colId; + if (pUidTagInfo->pTagVal == NULL) { + colDataSetNULL(pColInfo, rowIndex); + } else { + const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pColInfo->info.type, &tagVal); + + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { + colDataSetNULL(pColInfo, rowIndex); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { + colDataSetVal(pColInfo, rowIndex, p, false); + } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { + char* tmp = taosMemoryMalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1); + varDataSetLen(tmp, tagVal.nData); + memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); + colDataSetVal(pColInfo, rowIndex, tmp, false); + taosMemoryFree(tmp); + } else { + colDataSetVal(pColInfo, rowIndex, (const char*)&tagVal.i64, false); + } + } + } +} + +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aUidTagIdxs, + SStorageAPI* pAPI) { + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + + for (int i = 0; i < taosArrayGetSize(aUidTagIdxs); ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, *(int32_t*)taosArrayGet(aUidTagIdxs, i)); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + } + } + return 0; +} + +#if 0 +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, + SStorageAPI* pAPI) { + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + + int32_t nTbls = taosArrayGetSize(aUidTags); + for (int i = 0; i < nTbls; ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + + // refactor later + if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) { + char str[512]; + + STR_TO_VARSTR(str, "zsl"); + colDataSetVal(pDst, (i), str, false); + } else { // it is a tag value + STagVal val = {0}; + val.cid = pExprInfo[j].base.pParam[0].pCol->colId; + const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pDst->info.type, &val); + + char* data = NULL; + if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) { + data = tTagValToData((const STagVal*)p, false); + } else { + data = (char*)p; + } + colDataSetVal(pDst, i, data, + (data == NULL) || (pDst->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data))); + + if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) && + data != NULL) { + taosMemoryFree(data); + } + } + } + } + return 0; +} +#endif + + +static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStorageAPI* pAPI = &pTaskInfo->storageAPI; + + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + SSDataBlock* pRes = pInfo->pRes; + blockDataCleanup(pRes); + int32_t count = 0; + + if (pInfo->pCtbCursor == NULL) { + pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); + } + SArray* aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); + SArray* aUidTagIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + while (1) { + while (count < pOperator->resultInfo.capacity) { + SMCtbCursor* pCur = pInfo->pCtbCursor; + tb_uid_t uid = pAPI->metaFn.ctbCursorNext(pInfo->pCtbCursor); + if (uid == 0) { + break; + } + STUidTagInfo info = {.uid = uid, .pTagVal = pCur->pVal}; + info.pTagVal = taosMemoryMalloc(pCur->vLen); + memcpy(info.pTagVal, pCur->pVal, pCur->vLen); + taosArrayPush(aUidTags, &info); + } + + int32_t numTables = taosArrayGetSize(aUidTags); + if (numTables != 0 && pInfo->pTagCond != NULL) { + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aUidTagIdxs, pAPI); + } + tagScanFillResultBlock(pOperator, pRes, aUidTags, aUidTagIdxs, pAPI); + if (taosArrayGetSize(aUidTagIdxs) != 0) { + break; + } + taosArrayClearEx(aUidTags, tagScanFreeUidTag); + taosArrayClear(aUidTagIdxs); + } + taosArrayDestroy(aUidTagIdxs); + taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); + pOperator->resultInfo.totalRows += count; + return (pRes->info.rows == 0) ? NULL : pInfo->pRes; +} + static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -2753,7 +3018,7 @@ static void destroyTagScanOperatorInfo(void* param) { } SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, - STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) { + STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -2774,7 +3039,8 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi if (code != TSDB_CODE_SUCCESS) { goto _error; } - + pInfo->pTagCond = pTagCond; + pInfo->pTagIndexCond = pTagIndexCond; pInfo->pTableListInfo = pTableListInfo; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; @@ -2789,6 +3055,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doTagScan, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); + pInfo->suid = pPhyNode->suid; return pOperator; _error: From 20f5e2af5b5f6742466e82611f2e54278af6d776 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 10 Aug 2023 17:40:54 +0800 Subject: [PATCH 34/81] continue coding and save work --- source/libs/executor/src/scanoperator.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 24ed717c8a..42c488edbe 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2913,7 +2913,6 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { SStorageAPI* pAPI = &pTaskInfo->storageAPI; STagScanInfo* pInfo = pOperator->info; - SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; SSDataBlock* pRes = pInfo->pRes; blockDataCleanup(pRes); int32_t count = 0; @@ -2941,6 +2940,8 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aUidTagIdxs, pAPI); } tagScanFillResultBlock(pOperator, pRes, aUidTags, aUidTagIdxs, pAPI); + count = taosArrayGetSize(aUidTagIdxs); + if (taosArrayGetSize(aUidTagIdxs) != 0) { break; } From 7085d6bc11d7c8fb7ef18258273bccdee5817337 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 18:49:17 +0800 Subject: [PATCH 35/81] mxml: disable shared lib --- cmake/mxml_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 87b126d8d3..7377f81c33 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,7 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --enable-shared=no #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install From 4513acfee925eaebf27b1486561afe3afab0ffe4 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 09:54:54 +0800 Subject: [PATCH 36/81] cos: use static libs for mxml, apr, apu, curl --- cmake/apr-util_CMakeLists.txt.in | 2 +- cmake/apr_CMakeLists.txt.in | 2 +- cmake/curl_CMakeLists.txt.in | 2 +- cmake/mxml_CMakeLists.txt.in | 2 +- contrib/CMakeLists.txt | 7 ++++--- source/dnode/vnode/CMakeLists.txt | 2 +- tests/parallel_test/container_build.sh | 4 ++-- 7 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 6172be380e..d98a381005 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ --with-apr=$ENV{HOME}/.cos-local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 538b45a7f9..18c4eb62a1 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(apr-1 UPDATE_DISCONNECTED TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --enable-shared=no #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 1d9d028848..5f1efc1e5a 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --without-ssl + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 7377f81c33..9dcb5df665 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,7 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --enable-shared=no + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --enable-shared=no #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index d20b205e69..452192a288 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -10,7 +10,7 @@ set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY $ENV{HOME}/.cos-local/) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) @@ -389,10 +389,11 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local) + set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local/include) + INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include) + MESSAGE("$ENV{HOME}/.cos-local.1/include") set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index c036fbc54a..684134c2d6 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -196,7 +196,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "$ENV{HOME}/.cos-local/include" + PUBLIC "$ENV{HOME}/.cos-local.1/include" ) if(${BUILD_TEST}) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 62254984a9..f5e426057e 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,7 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/.cos-local:/root/.cos-local \ + -v /root/.cos-local.1:/root/.cos-local.1 \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ @@ -89,7 +89,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/.cos-local:/root/.cos-local \ + -v /root/.cos-local.1:/root/.cos-local.1 \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From 104ead6783d3f5225a8d53c8df07de46c2ac2f9b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 10:06:42 +0800 Subject: [PATCH 37/81] apu: fix apr location --- cmake/apr-util_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index d98a381005..5a68020dd7 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1 #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install From 7c39bc989083ce501dd6df5bd980b4edaab07057 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 11 Aug 2023 13:50:41 +0800 Subject: [PATCH 38/81] fix: some minor modifications --- source/libs/executor/inc/executorInt.h | 1 + source/libs/executor/src/scanoperator.c | 77 ++++++++++++++----------- 2 files changed, 44 insertions(+), 34 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index cadf367481..2b25feabb3 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -263,6 +263,7 @@ typedef struct STagScanInfo { void* pCtbCursor; SNode* pTagCond; SNode* pTagIndexCond; + SStorageAPI* pStorageAPI; } STagScanInfo; typedef enum EStreamScanMode { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 42c488edbe..5e0eb71c13 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2767,9 +2767,10 @@ static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { } -static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aUidTagIdxs, void* pVnode, SStorageAPI* pAPI) { +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI) { int32_t code = 0; int32_t numOfTables = taosArrayGetSize(aUidTags); + STagScanFilterContext ctx = {0}; ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); @@ -2777,48 +2778,42 @@ static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aU nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&ctx); SSDataBlock* pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); - if (pResBlock == NULL) { - - } SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; SScalarParam output = {0}; - code = tagScanCreateResultData(&type, numOfTables, &output); - if (code != TSDB_CODE_SUCCESS) { + tagScanCreateResultData(&type, numOfTables, &output); - } - - code = scalarCalculate(pTagCond, pBlockList, &output); - if (code != TSDB_CODE_SUCCESS) { - } + scalarCalculate(pTagCond, pBlockList, &output); bool* result = (bool*)output.columnData->pData; for (int32_t i = 0 ; i < numOfTables; ++i) { if (result[i]) { - taosArrayPush(aUidTagIdxs, &i); + taosArrayPush(aFilterIdxs, &i); } } - taosHashCleanup(ctx.colHash); - taosArrayDestroy(ctx.cInfoList); - blockDataDestroy(pResBlock); - taosArrayDestroy(pBlockList); colDataDestroy(output.columnData); taosMemoryFreeClear(output.columnData); + + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); } static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { if (fmIsScanPseudoColumnFunc(pExprInfo->pExpr->_function.functionId)) { // tbname char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; +// if (pUidTagInfo->name != NULL) { +// STR_TO_VARSTR(str, pUidTagInfo->name); +// } else { // name is not retrieved during filter +// pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); +// } STR_TO_VARSTR(str, "zsl"); - // if (pUidTagInfo->name != NULL) { - // STR_TO_VARSTR(str, pUidTagInfo->name); - // } else { // name is not retrieved during filter - // pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); - // } colDataSetVal(pColInfo, rowIndex, str, false); } else { @@ -2846,13 +2841,15 @@ static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo } } -static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aUidTagIdxs, +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, SStorageAPI* pAPI) { STagScanInfo* pInfo = pOperator->info; SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; - for (int i = 0; i < taosArrayGetSize(aUidTagIdxs); ++i) { - STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, *(int32_t*)taosArrayGet(aUidTagIdxs, i)); + size_t szTables = taosArrayGetSize(aFilterIdxs); + for (int i = 0; i < szTables; ++i) { + int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); @@ -2920,8 +2917,10 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { if (pInfo->pCtbCursor == NULL) { pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); } + SArray* aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); - SArray* aUidTagIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + SArray* aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + while (1) { while (count < pOperator->resultInfo.capacity) { SMCtbCursor* pCur = pInfo->pCtbCursor; @@ -2936,20 +2935,26 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { } int32_t numTables = taosArrayGetSize(aUidTags); - if (numTables != 0 && pInfo->pTagCond != NULL) { - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aUidTagIdxs, pAPI); - } - tagScanFillResultBlock(pOperator, pRes, aUidTags, aUidTagIdxs, pAPI); - count = taosArrayGetSize(aUidTagIdxs); - - if (taosArrayGetSize(aUidTagIdxs) != 0) { + if (numTables == 0) { break; } + + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + + tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, pAPI); + count = taosArrayGetSize(aFilterIdxs); + + if (count != 0) { + break; + } + taosArrayClearEx(aUidTags, tagScanFreeUidTag); - taosArrayClear(aUidTagIdxs); + taosArrayClear(aFilterIdxs); } - taosArrayDestroy(aUidTagIdxs); + + taosArrayDestroy(aFilterIdxs); taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); + pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } @@ -3012,6 +3017,9 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { static void destroyTagScanOperatorInfo(void* param) { STagScanInfo* pInfo = (STagScanInfo*)param; + if (pInfo->pCtbCursor != NULL) { + pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); + } pInfo->pRes = blockDataDestroy(pInfo->pRes); taosArrayDestroy(pInfo->matchInfo.pList); pInfo->pTableListInfo = tableListDestroy(pInfo->pTableListInfo); @@ -3043,6 +3051,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi pInfo->pTagCond = pTagCond; pInfo->pTagIndexCond = pTagIndexCond; pInfo->pTableListInfo = pTableListInfo; + pInfo->pStorageAPI = &pTaskInfo->storageAPI; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; pInfo->curPos = 0; From 1c7f854a719b590f4aa5d201d2543681d1e28975 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 11 Aug 2023 14:47:28 +0800 Subject: [PATCH 39/81] enhance: add only meta ctb index to tag scan physi node --- include/libs/nodes/plannodes.h | 14 +++- source/libs/nodes/src/nodesCloneFuncs.c | 11 ++- source/libs/nodes/src/nodesCodeFuncs.c | 70 ++++++++++++++++++- source/libs/nodes/src/nodesMsgFuncs.c | 89 +++++++++++++++++++++++++ 4 files changed, 180 insertions(+), 4 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 063318332a..0830dc4918 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -334,7 +334,19 @@ typedef struct SScanPhysiNode { bool groupOrderScan; } SScanPhysiNode; -typedef SScanPhysiNode STagScanPhysiNode; +typedef struct STagScanPhysiNode { + // SScanPhysiNode scan; //TODO? + SPhysiNode node; + SNodeList* pScanCols; + SNodeList* pScanPseudoCols; + uint64_t uid; // unique id of the table + uint64_t suid; + int8_t tableType; + SName tableName; + bool groupOrderScan; + bool onlyMetaCtbIdx; //no tbname, tag index not used. +} STagScanPhysiNode; + typedef SScanPhysiNode SBlockDistScanPhysiNode; typedef struct SLastRowScanPhysiNode { diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index f5eacf0bd5..965af41fa7 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -564,7 +564,16 @@ static int32_t physiScanCopy(const SScanPhysiNode* pSrc, SScanPhysiNode* pDst) { } static int32_t physiTagScanCopy(const STagScanPhysiNode* pSrc, STagScanPhysiNode* pDst) { - return physiScanCopy(pSrc, pDst); + COPY_BASE_OBJECT_FIELD(node, physiNodeCopy); + CLONE_NODE_LIST_FIELD(pScanCols); + CLONE_NODE_LIST_FIELD(pScanPseudoCols); + COPY_SCALAR_FIELD(uid); + COPY_SCALAR_FIELD(suid); + COPY_SCALAR_FIELD(tableType); + COPY_OBJECT_FIELD(tableName, sizeof(SName)); + COPY_SCALAR_FIELD(groupOrderScan); + COPY_SCALAR_FIELD(onlyMetaCtbIdx); + return TSDB_CODE_SUCCESS; } static int32_t physiTableScanCopy(const STableScanPhysiNode* pSrc, STableScanPhysiNode* pDst) { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index f25616065e..3540f8cb70 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1562,7 +1562,7 @@ static const char* jkScanPhysiPlanTableName = "TableName"; static const char* jkScanPhysiPlanGroupOrderScan = "GroupOrderScan"; static int32_t physiScanNodeToJson(const void* pObj, SJson* pJson) { - const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + const SScanPhysiNode* pNode = (const SScanPhysiNode*)pObj; int32_t code = physicPlanNodeToJson(pObj, pJson); if (TSDB_CODE_SUCCESS == code) { @@ -1591,7 +1591,7 @@ static int32_t physiScanNodeToJson(const void* pObj, SJson* pJson) { } static int32_t jsonToPhysiScanNode(const SJson* pJson, void* pObj) { - STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + SScanPhysiNode* pNode = (SScanPhysiNode*)pObj; int32_t code = jsonToPhysicPlanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { @@ -1619,6 +1619,70 @@ static int32_t jsonToPhysiScanNode(const SJson* pJson, void* pObj) { return code; } +static const char* jkTagScanPhysiOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; + +static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { + const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + + int32_t code = physicPlanNodeToJson(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkScanPhysiPlanScanCols, pNode->pScanCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkScanPhysiPlanScanPseudoCols, pNode->pScanPseudoCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableId, pNode->uid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanSTableId, pNode->suid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableType, pNode->tableType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkScanPhysiPlanTableName, nameToJson, &pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkScanPhysiPlanGroupOrderScan, pNode->groupOrderScan); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkTagScanPhysiOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); + } + return code; +} + +static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { + STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + + int32_t code = jsonToPhysicPlanNode(pJson, pObj); + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkScanPhysiPlanScanCols, &pNode->pScanCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkScanPhysiPlanScanPseudoCols, &pNode->pScanPseudoCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanTableId, &pNode->uid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanSTableId, &pNode->suid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkScanPhysiPlanTableType, &pNode->tableType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonToObject(pJson, jkScanPhysiPlanTableName, jsonToName, &pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkScanPhysiPlanGroupOrderScan, &pNode->groupOrderScan); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); + } + return code; +} + static const char* jkLastRowScanPhysiPlanGroupTags = "GroupTags"; static const char* jkLastRowScanPhysiPlanGroupSort = "GroupSort"; @@ -6590,6 +6654,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { case QUERY_NODE_LOGIC_PLAN: return logicPlanToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + return physiTableScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: return physiScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: @@ -6908,6 +6973,7 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { case QUERY_NODE_LOGIC_PLAN: return jsonToLogicPlan(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + return jsonToPhysiTagScanNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: case QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN: return jsonToPhysiScanNode(pJson, pObj); diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 20e829766d..4d1120861d 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2003,6 +2003,91 @@ static int32_t msgToPhysiScanNode(STlvDecoder* pDecoder, void* pObj) { return code; } +enum { + PHY_TAG_SCAN_CODE_BASE_NODE = 1, + PHY_TAG_SCAN_CODE_SCAN_COLS, + PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, + PHY_TAG_SCAN_CODE_BASE_UID, + PHY_TAG_SCAN_CODE_BASE_SUID, + PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, + PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, + PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, + PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX +}; + +static int32_t physiTagScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { + const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + + int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_NODE, physiNodeToMsg, &pNode->node); + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_COLS, nodeListToMsg, pNode->pScanCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, nodeListToMsg, pNode->pScanPseudoCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_UID, pNode->uid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_SUID, pNode->suid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI8(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, pNode->tableType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, nameToMsg, &pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, pNode->groupOrderScan); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX, pNode->onlyMetaCtbIdx); + } + return code; +} + +static int32_t msgToPhysiTagScanNode(STlvDecoder* pDecoder, void* pObj) { + STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + + int32_t code = TSDB_CODE_SUCCESS; + STlv* pTlv = NULL; + tlvForEach(pDecoder, pTlv, code) { + switch (pTlv->type) { + case PHY_TAG_SCAN_CODE_BASE_NODE: + code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node); + break; + case PHY_TAG_SCAN_CODE_SCAN_COLS: + code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanCols); + break; + case PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS: + code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanPseudoCols); + break; + case PHY_TAG_SCAN_CODE_BASE_UID: + code = tlvDecodeU64(pTlv, &pNode->uid); + break; + case PHY_TAG_SCAN_CODE_BASE_SUID: + code = tlvDecodeU64(pTlv, &pNode->suid); + break; + case PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE: + code = tlvDecodeI8(pTlv, &pNode->tableType); + break; + case PHY_TAG_SCAN_CODE_BASE_TABLE_NAME: + code = tlvDecodeObjFromTlv(pTlv, msgToName, &pNode->tableName); + break; + case PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN: + code = tlvDecodeBool(pTlv, &pNode->groupOrderScan); + break; + case PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX: + code = tlvDecodeBool(pTlv, &pNode->onlyMetaCtbIdx); + break; + default: + break; + } + } + + return code; +} + enum { PHY_LAST_ROW_SCAN_CODE_SCAN = 1, PHY_LAST_ROW_SCAN_CODE_GROUP_TAGS, @@ -3726,6 +3811,8 @@ static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { code = caseWhenNodeToMsg(pObj, pEncoder); break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + code = physiTagScanNodeToMsg(pObj, pEncoder); + break; case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: code = physiScanNodeToMsg(pObj, pEncoder); break; @@ -3869,6 +3956,8 @@ static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) { code = msgToCaseWhenNode(pDecoder, pObj); break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + code = msgToPhysiTagScanNode(pDecoder, pObj); + break; case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: code = msgToPhysiScanNode(pDecoder, pObj); break; From a0c62d215d36bc980aabe3ebb12ee32521db2c43 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 11 Aug 2023 14:54:43 +0800 Subject: [PATCH 40/81] enhance: tag scan only meta ctb idx backend modification --- source/libs/executor/src/operator.c | 19 ++++++++++--------- source/libs/executor/src/scanoperator.c | 9 ++++++--- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 0fc1b77b73..d0805a86e4 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -370,17 +370,18 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR STableCountScanPhysiNode* pTblCountScanNode = (STableCountScanPhysiNode*)pPhyNode; pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { - STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode; + STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); - int32_t code = createScanTableListInfo(pScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, - pTagIndexCond, pTaskInfo); - if (code != TSDB_CODE_SUCCESS) { - pTaskInfo->code = code; - qError("failed to getTableList, code: %s", tstrerror(code)); - return NULL; + if (!pTagScanPhyNode->onlyMetaCtbIdx) { + int32_t code = createScanTableListInfo(pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, + pTagIndexCond, pTaskInfo); + if (code != TSDB_CODE_SUCCESS) { + pTaskInfo->code = code; + qError("failed to getTableList, code: %s", tstrerror(code)); + return NULL; + } } - - pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); + pOperator = createTagScanOperatorInfo(pHandle, pTagScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN == type) { SBlockDistScanPhysiNode* pBlockNode = (SBlockDistScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 5e0eb71c13..107ea14914 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -3048,10 +3048,13 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi if (code != TSDB_CODE_SUCCESS) { goto _error; } + pInfo->pTagCond = pTagCond; pInfo->pTagIndexCond = pTagIndexCond; - pInfo->pTableListInfo = pTableListInfo; + pInfo->suid = pPhyNode->suid; pInfo->pStorageAPI = &pTaskInfo->storageAPI; + + pInfo->pTableListInfo = pTableListInfo; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; pInfo->curPos = 0; @@ -3062,10 +3065,10 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); + __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScan; pOperator->fpSet = - createOperatorFpSet(optrDummyOpenFn, doTagScan, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); + createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); - pInfo->suid = pPhyNode->suid; return pOperator; _error: From 84452c8deefc8ca600dcefdff907f6bc4608edcc Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 16:36:53 +0800 Subject: [PATCH 41/81] cos: use static sdk --- cmake/curl_CMakeLists.txt.in | 2 +- source/dnode/vnode/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 5f1efc1e5a..856d42257a 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 684134c2d6..a07e38e53b 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -162,7 +162,7 @@ target_link_libraries( PUBLIC index # s3 - cos_c_sdk + cos_c_sdk_static ${APR_UTIL_LIBRARY} ${APR_LIBRARY} ${MINIXML_LIBRARY} From 6530658815346945aaa333326ab72f54067182c4 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 11 Aug 2023 17:05:59 +0800 Subject: [PATCH 42/81] fix: continue coding --- source/libs/executor/src/operator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index d0805a86e4..7f0c5baa36 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -373,7 +373,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { - int32_t code = createScanTableListInfo(pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, + int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); if (code != TSDB_CODE_SUCCESS) { pTaskInfo->code = code; From e1971cf0a023e1f8e155a27e0fc1e92e2c56b415 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 17:18:38 +0800 Subject: [PATCH 43/81] curl: disable brotli with static lib --- cmake/curl_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 856d42257a..0fe0c2256f 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install From 47d2f9ad6df4385e0cb912841204e94cff1d4ed0 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 11 Aug 2023 17:52:52 +0800 Subject: [PATCH 44/81] fix: first run without tag cond --- source/dnode/vnode/src/meta/metaQuery.c | 4 ++-- source/dnode/vnode/src/vnd/vnodeQuery.c | 6 +++--- source/libs/executor/src/operator.c | 1 + source/libs/executor/src/scanoperator.c | 15 +++++++++++---- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 31c7bc8500..39c3dfa080 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -427,7 +427,7 @@ SMCtbCursor *metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock) { metaRLock(pMeta); } - ret = tdbTbcOpen(pMeta->pCtbIdx, &pCtbCur->pCur, NULL); + ret = tdbTbcOpen(pMeta->pCtbIdx, (TBC**)&pCtbCur->pCur, NULL); if (ret < 0) { metaULock(pMeta); taosMemoryFree(pCtbCur); @@ -1365,7 +1365,7 @@ int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) { } int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *pUidTagInfo) { - SMCtbCursor *pCur = metaOpenCtbCursor(((SVnode *)pVnode)->pMeta, suid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 1); // If len > 0 means there already have uids, and we only want the // tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 51f4cee40c..48f8ec021d 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -440,7 +440,7 @@ int32_t vnodeGetTableList(void* pVnode, int8_t type, SArray* pList) { } int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list) { - SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, uid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, uid, 1); while (1) { tb_uid_t id = metaCtbCursorNext(pCur); @@ -462,7 +462,7 @@ int32_t vnodeGetCtbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list) { SVnode *pVnodeObj = pVnode; - SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj->pMeta, suid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj, suid, 1); while (1) { tb_uid_t id = metaCtbCursorNext(pCur); @@ -521,7 +521,7 @@ int32_t vnodeGetStbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo } int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) { - SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, suid, 0); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 0); if (!pCur) { return TSDB_CODE_FAILED; } diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 7f0c5baa36..31998d13b6 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -371,6 +371,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; + pTagScanPhyNode->onlyMetaCtbIdx = true; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 107ea14914..5f8bf03d80 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2922,7 +2922,8 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { SArray* aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); while (1) { - while (count < pOperator->resultInfo.capacity) { + int32_t numTables = 0; + while (numTables < pOperator->resultInfo.capacity) { SMCtbCursor* pCur = pInfo->pCtbCursor; tb_uid_t uid = pAPI->metaFn.ctbCursorNext(pInfo->pCtbCursor); if (uid == 0) { @@ -2932,14 +2933,19 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { info.pTagVal = taosMemoryMalloc(pCur->vLen); memcpy(info.pTagVal, pCur->pVal, pCur->vLen); taosArrayPush(aUidTags, &info); + ++numTables; } - int32_t numTables = taosArrayGetSize(aUidTags); if (numTables == 0) { break; } - - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + if (pInfo->pTagCond != NULL) { + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + } else { + for (int i = 0; i < numTables; ++i) { + taosArrayPush(aFilterIdxs, &i); + } + } tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, pAPI); count = taosArrayGetSize(aFilterIdxs); @@ -2955,6 +2961,7 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { taosArrayDestroy(aFilterIdxs); taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); + pRes->info.rows = count; pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } From edd2fa4f351c3c3434c2143822b0a188a1c305d1 Mon Sep 17 00:00:00 2001 From: slzhou Date: Sat, 12 Aug 2023 08:17:43 +0800 Subject: [PATCH 45/81] fix: pass compilation and simple test --- source/libs/executor/src/scanoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 5f8bf03d80..ac20bae167 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2940,7 +2940,7 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { break; } if (pInfo->pTagCond != NULL) { - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI); } else { for (int i = 0; i < numTables; ++i) { taosArrayPush(aFilterIdxs, &i); From f83bfec067deb4de1ab9e98434094f0d0e20a8cf Mon Sep 17 00:00:00 2001 From: slzhou Date: Sat, 12 Aug 2023 08:28:25 +0800 Subject: [PATCH 46/81] fix: change only meta ctb idx back to false --- source/libs/executor/src/operator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 31998d13b6..abef8298e5 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -371,7 +371,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; - pTagScanPhyNode->onlyMetaCtbIdx = true; + pTagScanPhyNode->onlyMetaCtbIdx = false; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, From 6688d70ba41400c83e9686e0ca6565dc861b1327 Mon Sep 17 00:00:00 2001 From: slzhou Date: Sun, 13 Aug 2023 18:46:55 +0800 Subject: [PATCH 47/81] fix: fix planner test error --- source/libs/nodes/src/nodesCodeFuncs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 3540f8cb70..a2de0bc63a 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -6654,7 +6654,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { case QUERY_NODE_LOGIC_PLAN: return logicPlanToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: - return physiTableScanNodeToJson(pObj, pJson); + return physiTagScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: return physiScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: From c97b9249fc4e08667648d28ab577828ff8640dd7 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 14 Aug 2023 14:38:28 +0800 Subject: [PATCH 48/81] cos: only for linux --- cmake/cmake.options | 4 ++ contrib/CMakeLists.txt | 9 +++-- source/common/src/tglobal.c | 2 + source/dnode/vnode/CMakeLists.txt | 56 +++++++++++++++------------ source/dnode/vnode/src/vnd/vnodeCos.c | 14 +++++++ 5 files changed, 57 insertions(+), 28 deletions(-) diff --git a/cmake/cmake.options b/cmake/cmake.options index ea5efcb13a..1d4e9ba515 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -125,12 +125,16 @@ option( ON ) +IF(${TD_LINUX}) + option( BUILD_WITH_COS "If build with cos" ON ) +ENDIF () + option( BUILD_WITH_SQLITE "If build with sqlite" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 452192a288..e3e48ac3a1 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -6,6 +6,8 @@ function(cat IN_FILE OUT_FILE) file(APPEND ${OUT_FILE} "${CONTENTS}") endfunction(cat IN_FILE OUT_FILE) +if(${TD_LINUX}) + set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) @@ -35,6 +37,8 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . execute_process(COMMAND "${CMAKE_COMMAND}" --build . WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +endif(${TD_LINUX}) + set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -388,7 +392,7 @@ endif() # cos if(${BUILD_WITH_COS}) - if(NOT ${TD_WINDOWS}) + if(${TD_LINUX}) set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) @@ -406,10 +410,9 @@ if(${BUILD_WITH_COS}) ) set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) - else() - endif(NOT ${TD_WINDOWS}) + endif(${TD_LINUX}) endif(${BUILD_WITH_COS}) # lucene diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 2447e02698..3595347db3 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -292,7 +292,9 @@ int32_t taosSetS3Cfg(SConfig *pCfg) { } } if (tsS3BucketName[0] != '<' && tsDiskCfgNum > 1) { +#ifdef USE_COS tsS3Enabled = true; +#endif } return 0; diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index a07e38e53b..052b6be37f 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,12 +135,6 @@ else() endif() endif() -set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") -find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -find_library(MINIXML_LIBRARY mxml) -find_library(CURL_LIBRARY curl) - target_link_libraries( vnode PUBLIC os @@ -160,28 +154,24 @@ target_link_libraries( PUBLIC transport PUBLIC stream PUBLIC index - - # s3 - cos_c_sdk_static - ${APR_UTIL_LIBRARY} - ${APR_LIBRARY} - ${MINIXML_LIBRARY} - ${CURL_LIBRARY} ) -IF (TD_GRANT) - TARGET_LINK_LIBRARIES(vnode PUBLIC grant) -ENDIF () +if(${TD_LINUX}) +set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) +target_link_libraries( + vnode -target_compile_definitions(vnode PUBLIC -DMETA_REFACT) - -if(${BUILD_WITH_INVERTEDINDEX}) - add_definitions(-DUSE_INVERTED_INDEX) -endif(${BUILD_WITH_INVERTEDINDEX}) - -if(${BUILD_WITH_ROCKSDB}) - add_definitions(-DUSE_ROCKSDB) -endif(${BUILD_WITH_ROCKSDB}) + # s3 + PUBLIC cos_c_sdk_static + PUBLIC ${APR_UTIL_LIBRARY} + PUBLIC ${APR_LIBRARY} + PUBLIC ${MINIXML_LIBRARY} + PUBLIC ${CURL_LIBRARY} +) # s3 FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) @@ -199,6 +189,22 @@ target_include_directories( PUBLIC "$ENV{HOME}/.cos-local.1/include" ) +endif(${TD_LINUX}) + +IF (TD_GRANT) + TARGET_LINK_LIBRARIES(vnode PUBLIC grant) +ENDIF () + +target_compile_definitions(vnode PUBLIC -DMETA_REFACT) + +if(${BUILD_WITH_INVERTEDINDEX}) + add_definitions(-DUSE_INVERTED_INDEX) +endif(${BUILD_WITH_INVERTEDINDEX}) + +if(${BUILD_WITH_ROCKSDB}) + add_definitions(-DUSE_ROCKSDB) +endif(${BUILD_WITH_ROCKSDB}) + if(${BUILD_TEST}) add_subdirectory(test) endif(${BUILD_TEST}) diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index a40e046972..52d5fc2b1b 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -12,6 +12,7 @@ extern char tsS3AccessKeySecret[]; extern char tsS3BucketName[]; extern char tsS3AppId[]; +#ifdef USE_COS int32_t s3Init() { if (cos_http_io_initialize(NULL, 0) != COSE_OK) { return -1; @@ -294,3 +295,16 @@ long s3Size(const char *object_name) { return size; } + +#else + +int32_t s3Init() { return 0; } +void s3CleanUp() {} +void s3PutObjectFromFile(const char *file, const char *object) {} +void s3DeleteObjects(const char *object_name[], int nobject) {} +bool s3Exists(const char *object_name) { return false; } +bool s3Get(const char *object_name, const char *path) { return false; } +void s3EvictCache(const char *path, long object_size) {} +long s3Size(const char *object_name) { return 0; } + +#endif From 989abc2bf6264ea6c2837c3daa44027581405807 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 14 Aug 2023 15:03:17 +0800 Subject: [PATCH 49/81] vnode/cos: move includes into USE_COS --- source/dnode/vnode/src/vnd/vnodeCos.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 52d5fc2b1b..b28b7ad747 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -2,10 +2,6 @@ #include "vndCos.h" -#include "cos_api.h" -#include "cos_http_io.h" -#include "cos_log.h" - extern char tsS3Endpoint[]; extern char tsS3AccessKeyId[]; extern char tsS3AccessKeySecret[]; @@ -13,6 +9,10 @@ extern char tsS3BucketName[]; extern char tsS3AppId[]; #ifdef USE_COS +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + int32_t s3Init() { if (cos_http_io_initialize(NULL, 0) != COSE_OK) { return -1; From 5c276fa547c0b5ba9a7a7332968ecc89f2b51105 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Fri, 11 Aug 2023 17:15:17 +0800 Subject: [PATCH 50/81] fix: make kill query work for sysscanoperator --- source/libs/executor/inc/querytask.h | 2 +- source/libs/executor/inc/tsort.h | 3 ++- source/libs/executor/src/querytask.c | 2 +- source/libs/executor/src/scanoperator.c | 5 +++-- source/libs/executor/src/sysscanoperator.c | 5 +++++ source/libs/executor/src/tsort.c | 13 ++++++++++--- 6 files changed, 22 insertions(+), 8 deletions(-) diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index 7241b015a0..0742b9ba4c 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -99,7 +99,7 @@ struct SExecTaskInfo { void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst); SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model, SStorageAPI* pAPI); void doDestroyTask(SExecTaskInfo* pTaskInfo); -bool isTaskKilled(SExecTaskInfo* pTaskInfo); +bool isTaskKilled(void* pTaskInfo); void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode); void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status); int32_t createExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index 57c8bce275..3180173ca7 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -191,7 +191,8 @@ int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols); bool tsortIsClosed(SSortHandle* pHandle); void tsortSetClosed(SSortHandle* pHandle); -void setSingleTableMerge(SSortHandle* pHandle); +void tsortSetSingleTableMerge(SSortHandle* pHandle); +void tsortSetAbortCheckFn(SSortHandle* pHandle, bool (*checkFn)(void* param), void* param); #ifdef __cplusplus } diff --git a/source/libs/executor/src/querytask.c b/source/libs/executor/src/querytask.c index 22d171e74a..980ef1a61a 100644 --- a/source/libs/executor/src/querytask.c +++ b/source/libs/executor/src/querytask.c @@ -59,7 +59,7 @@ SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOP return pTaskInfo; } -bool isTaskKilled(SExecTaskInfo* pTaskInfo) { return (0 != pTaskInfo->code); } +bool isTaskKilled(void* pTaskInfo) { return (0 != ((SExecTaskInfo*)pTaskInfo)->code); } void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode) { pTaskInfo->code = rspCode; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71b0747be8..a3c5a76a7f 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2928,8 +2928,9 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_BLOCK_TS_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->pSortInputBlock, pTaskInfo->id.str, 0, 0, 0); - + tsortSetMergeLimit(pInfo->pSortHandle, mergeLimit); + tsortSetAbortCheckFn(pInfo->pSortHandle, isTaskKilled, pOperator->pTaskInfo); } tsortSetFetchRawDataFp(pInfo->pSortHandle, getBlockForTableMergeScan, NULL, NULL); @@ -2949,7 +2950,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { int32_t code = TSDB_CODE_SUCCESS; if (numOfTable == 1) { - setSingleTableMerge(pInfo->pSortHandle); + tsortSetSingleTableMerge(pInfo->pSortHandle); } else { code = tsortOpen(pInfo->pSortHandle); } diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index a1f83dda2f..9048dd43d7 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -1601,6 +1601,11 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { SSysTableScanInfo* pInfo = pOperator->info; char dbName[TSDB_DB_NAME_LEN] = {0}; + if (isTaskKilled(pOperator->pTaskInfo)) { + setOperatorCompleted(pOperator); + return NULL; + } + blockDataCleanup(pInfo->pRes); const char* name = tNameGetTableName(&pInfo->name); diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 1891e93c61..6c4a780dfb 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -71,12 +71,20 @@ struct SSortHandle { SMultiwayMergeTreeInfo* pMergeTree; bool singleTableMerge; + + bool (*abortCheckFn)(void* param); + void* abortCheckParam; }; -void setSingleTableMerge(SSortHandle* pHandle) { +void tsortSetSingleTableMerge(SSortHandle* pHandle) { pHandle->singleTableMerge = true; } +void tsortSetAbortCheckFn(SSortHandle *pHandle, bool (*checkFn)(void *), void* param) { + pHandle->abortCheckFn = checkFn; + pHandle->abortCheckParam = param; +} + static int32_t msortComparFn(const void* pLeft, const void* pRight, void* param); // | offset[0] | offset[1] |....| nullbitmap | data |...| @@ -726,11 +734,10 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t)); while (1) { - if (tsortIsClosed(pHandle)) { + if (tsortIsClosed(pHandle) || (pHandle->abortCheckFn && pHandle->abortCheckFn(pHandle->abortCheckParam))) { code = terrno = TSDB_CODE_TSC_QUERY_CANCELLED; return code; } - SSDataBlock* pDataBlock = getSortedBlockDataInner(pHandle, &pHandle->cmpParam, numOfRows); if (pDataBlock == NULL) { break; From 57d1957dee1d8a738dfe0f2fe3efd9716433a280 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 14 Aug 2023 15:57:27 +0800 Subject: [PATCH 51/81] enhance: tag scan code refactoring --- source/libs/executor/inc/executorInt.h | 2 + source/libs/executor/src/scanoperator.c | 110 +++++++++--------------- 2 files changed, 43 insertions(+), 69 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 2b25feabb3..cb066d809c 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -263,6 +263,8 @@ typedef struct STagScanInfo { void* pCtbCursor; SNode* pTagCond; SNode* pTagIndexCond; + SArray* aUidTags; // SArray + SArray* aFilterIdxs; // SArray SStorageAPI* pStorageAPI; } STagScanInfo; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ac20bae167..71352b1c6e 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2813,7 +2813,7 @@ static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo // } else { // name is not retrieved during filter // pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); // } - STR_TO_VARSTR(str, "zsl"); + STR_TO_VARSTR(str, "ctbidx"); colDataSetVal(pColInfo, rowIndex, str, false); } else { @@ -2841,66 +2841,32 @@ static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo } } -static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, bool ignoreFilterIdx, SStorageAPI* pAPI) { STagScanInfo* pInfo = pOperator->info; SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; - - size_t szTables = taosArrayGetSize(aFilterIdxs); - for (int i = 0; i < szTables; ++i) { - int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); - STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); - for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { - SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); - tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + if (!ignoreFilterIdx) { + size_t szTables = taosArrayGetSize(aFilterIdxs); + for (int i = 0; i < szTables; ++i) { + int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + } } - } - return 0; -} - -#if 0 -static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, - SStorageAPI* pAPI) { - STagScanInfo* pInfo = pOperator->info; - SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; - - int32_t nTbls = taosArrayGetSize(aUidTags); - for (int i = 0; i < nTbls; ++i) { - STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); - for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { - SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); - - // refactor later - if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) { - char str[512]; - - STR_TO_VARSTR(str, "zsl"); - colDataSetVal(pDst, (i), str, false); - } else { // it is a tag value - STagVal val = {0}; - val.cid = pExprInfo[j].base.pParam[0].pCol->colId; - const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pDst->info.type, &val); - - char* data = NULL; - if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) { - data = tTagValToData((const STagVal*)p, false); - } else { - data = (char*)p; - } - colDataSetVal(pDst, i, data, - (data == NULL) || (pDst->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data))); - - if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) && - data != NULL) { - taosMemoryFree(data); - } + } else { + size_t szTables = taosArrayGetSize(aUidTags); + for (int i = 0; i < szTables; ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); } } } return 0; } -#endif - static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { @@ -2912,16 +2878,19 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { STagScanInfo* pInfo = pOperator->info; SSDataBlock* pRes = pInfo->pRes; blockDataCleanup(pRes); - int32_t count = 0; if (pInfo->pCtbCursor == NULL) { pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); } - SArray* aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); - SArray* aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + SArray* aUidTags = pInfo->aUidTags; + SArray* aFilterIdxs = pInfo->aFilterIdxs; + int32_t count = 0; while (1) { + taosArrayClearEx(aUidTags, tagScanFreeUidTag); + taosArrayClear(aFilterIdxs); + int32_t numTables = 0; while (numTables < pOperator->resultInfo.capacity) { SMCtbCursor* pCur = pInfo->pCtbCursor; @@ -2939,34 +2908,29 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { if (numTables == 0) { break; } + bool ignoreFilterIdx = true; if (pInfo->pTagCond != NULL) { + ignoreFilterIdx = false; tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI); } else { - for (int i = 0; i < numTables; ++i) { - taosArrayPush(aFilterIdxs, &i); - } + ignoreFilterIdx = true; } - tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, pAPI); - count = taosArrayGetSize(aFilterIdxs); + tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, ignoreFilterIdx, pAPI); + + count = ignoreFilterIdx ? taosArrayGetSize(aUidTags): taosArrayGetSize(aFilterIdxs); if (count != 0) { break; } - - taosArrayClearEx(aUidTags, tagScanFreeUidTag); - taosArrayClear(aFilterIdxs); } - - taosArrayDestroy(aFilterIdxs); - taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); - + pRes->info.rows = count; pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } -static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { +static SSDataBlock* doTagScanFromMetaEntry(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; } @@ -3027,6 +2991,10 @@ static void destroyTagScanOperatorInfo(void* param) { if (pInfo->pCtbCursor != NULL) { pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); } + + taosArrayDestroy(pInfo->aFilterIdxs); + taosArrayDestroyEx(pInfo->aUidTags, tagScanFreeUidTag); + pInfo->pRes = blockDataDestroy(pInfo->pRes); taosArrayDestroy(pInfo->matchInfo.pList); pInfo->pTableListInfo = tableListDestroy(pInfo->pTableListInfo); @@ -3072,7 +3040,11 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); - __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScan; + if (pPhyNode->onlyMetaCtbIdx) { + pInfo->aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); + pInfo->aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + } + __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); From cf9f9ab4718c8420c72ea5767e097ba3c0b1c19c Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 14 Aug 2023 08:01:20 +0000 Subject: [PATCH 52/81] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndIndex.c | 70 ++++++-- source/dnode/vnode/src/meta/metaQuery.c | 14 +- source/dnode/vnode/src/meta/metaTable.c | 9 +- source/libs/parser/src/parTranslater.c | 229 +++++++++++++----------- 4 files changed, 191 insertions(+), 131 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndIndex.c b/source/dnode/mnode/impl/src/mndIndex.c index 2157804559..b56ea320cc 100644 --- a/source/dnode/mnode/impl/src/mndIndex.c +++ b/source/dnode/mnode/impl/src/mndIndex.c @@ -79,9 +79,12 @@ int32_t mndInitIdx(SMnode *pMnode) { return sdbSetTable(pMnode->pSdb, table); } -static int32_t mndFindSuperTableTagId(const SStbObj *pStb, const char *tagName) { +static int32_t mndFindSuperTableTagId(const SStbObj *pStb, const char *tagName, int8_t *hasIdx) { for (int32_t tag = 0; tag < pStb->numOfTags; tag++) { if (strcasecmp(pStb->pTags[tag].name, tagName) == 0) { + if (IS_IDX_ON(&pStb->pTags[tag])) { + *hasIdx = 1; + } return tag; } } @@ -597,7 +600,8 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb pNew->updateTime = taosGetTimestampMs(); pNew->lock = 0; - int32_t tag = mndFindSuperTableTagId(pOld, tagName); + int8_t hasIdx = 0; + int32_t tag = mndFindSuperTableTagId(pOld, tagName, &hasIdx); if (tag < 0) { terrno = TSDB_CODE_MND_TAG_NOT_EXIST; return -1; @@ -612,14 +616,14 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb SSchema *pTag = pNew->pTags + tag; if (on == 1) { - if (IS_IDX_ON(pTag)) { + if (hasIdx && tag != 0) { terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; return -1; } else { SSCHMEA_SET_IDX_ON(pTag); } } else { - if (!IS_IDX_ON(pTag)) { + if (hasIdx == 0) { terrno = TSDB_CODE_MND_SMA_NOT_EXIST; } else { SSCHMEA_SET_IDX_OFF(pTag); @@ -667,7 +671,42 @@ _OVER: mndTransDrop(pTrans); return code; } +int8_t mndCheckIndexNameByTagName(SMnode *pMnode, SIdxObj *pIdxObj) { + // build index on first tag, and no index name; + int8_t exist = 0; + SDbObj *pDb = NULL; + if (strlen(pIdxObj->db) > 0) { + pDb = mndAcquireDb(pMnode, pIdxObj->db); + if (pDb == NULL) return 0; + } + SSmaAndTagIter *pIter = NULL; + SIdxObj *pIdx = NULL; + SSdb *pSdb = pMnode->pSdb; + while (1) { + pIter = sdbFetch(pSdb, SDB_IDX, pIter, (void **)&pIdx); + if (pIter == NULL) break; + + if (NULL != pDb && pIdx->dbUid != pDb->uid) { + sdbRelease(pSdb, pIdx); + continue; + } + if (pIdxObj->stbUid != pIdx->stbUid) { + sdbRelease(pSdb, pIdx); + continue; + } + if (strncmp(pIdxObj->colName, pIdx->colName, TSDB_COL_NAME_LEN) == 0) { + sdbRelease(pSdb, pIdx); + sdbCancelFetch(pSdb, pIdx); + exist = 1; + break; + } + sdbRelease(pSdb, pIdx); + } + + mndReleaseDb(pMnode, pDb); + return exist; +} static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *req, SDbObj *pDb, SStbObj *pStb) { int32_t code = -1; SIdxObj idxObj = {0}; @@ -681,11 +720,20 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re idxObj.stbUid = pStb->uid; idxObj.dbUid = pStb->dbUid; - int32_t tag = mndFindSuperTableTagId(pStb, req->colName); + int8_t hasIdx = 0; + int32_t tag = mndFindSuperTableTagId(pStb, req->colName, &hasIdx); if (tag < 0) { terrno = TSDB_CODE_MND_TAG_NOT_EXIST; return -1; - } else if (tag == 0) { + } + int8_t exist = 0; + if (tag == 0 && hasIdx == 1) { + exist = mndCheckIndexNameByTagName(pMnode, &idxObj); + if (exist) { + terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; + return -1; + } + } else if (hasIdx == 1) { terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; return -1; } @@ -695,11 +743,11 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re return -1; } - SSchema *pTag = pStb->pTags + tag; - if (IS_IDX_ON(pTag)) { - terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; - return -1; - } + // SSchema *pTag = pStb->pTags + tag; + // if (IS_IDX_ON(pTag)) { + // terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; + // return -1; + // } code = mndAddIndexImpl(pMnode, pReq, pDb, pStb, &idxObj); return code; diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index c26bb45c2b..389994ce1d 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -17,8 +17,8 @@ #include "osMemory.h" #include "tencode.h" -void _metaReaderInit(SMetaReader* pReader, void* pVnode, int32_t flags, SStoreMeta* pAPI) { - SMeta* pMeta = ((SVnode*)pVnode)->pMeta; +void _metaReaderInit(SMetaReader *pReader, void *pVnode, int32_t flags, SStoreMeta *pAPI) { + SMeta *pMeta = ((SVnode *)pVnode)->pMeta; metaReaderDoInit(pReader, pMeta, flags); pReader->pAPI = pAPI; } @@ -143,7 +143,7 @@ tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) { int metaGetTableNameByUid(void *pVnode, uint64_t uid, char *tbName) { int code = 0; SMetaReader mr = {0}; - metaReaderDoInit(&mr, ((SVnode*)pVnode)->pMeta, 0); + metaReaderDoInit(&mr, ((SVnode *)pVnode)->pMeta, 0); code = metaReaderGetTableEntryByUid(&mr, uid); if (code < 0) { metaReaderClear(&mr); @@ -195,7 +195,7 @@ int metaGetTableUidByName(void *pVnode, char *tbName, uint64_t *uid) { int metaGetTableTypeByName(void *pVnode, char *tbName, ETableType *tbType) { int code = 0; SMetaReader mr = {0}; - metaReaderDoInit(&mr, ((SVnode*)pVnode)->pMeta, 0); + metaReaderDoInit(&mr, ((SVnode *)pVnode)->pMeta, 0); code = metaGetTableEntryByName(&mr, tbName); if (code == 0) *tbType = mr.me.type; @@ -244,7 +244,7 @@ SMTbCursor *metaOpenTbCursor(void *pVnode) { return NULL; } - SVnode* pVnodeObj = pVnode; + SVnode *pVnodeObj = pVnode; // tdbTbcMoveToFirst((TBC *)pTbCur->pDbc); pTbCur->pMeta = pVnodeObj->pMeta; pTbCur->paused = 1; @@ -1139,7 +1139,7 @@ int32_t metaFilterTtl(void *pVnode, SMetaFltParam *arg, SArray *pUids) { pCursor->type = param->type; metaRLock(pMeta); - //ret = tdbTbcOpen(pMeta->pTtlIdx, &pCursor->pCur, NULL); + // ret = tdbTbcOpen(pMeta->pTtlIdx, &pCursor->pCur, NULL); END: if (pCursor->pMeta) metaULock(pCursor->pMeta); @@ -1194,7 +1194,7 @@ int32_t metaFilterTableIds(void *pVnode, SMetaFltParam *arg, SArray *pUids) { ret = -1; for (int i = 0; i < oStbEntry.stbEntry.schemaTag.nCols; i++) { SSchema *schema = oStbEntry.stbEntry.schemaTag.pSchema + i; - if (schema->colId == param->cid && param->type == schema->type && (IS_IDX_ON(schema) || i == 0)) { + if (schema->colId == param->cid && param->type == schema->type && (IS_IDX_ON(schema))) { ret = 0; } } diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 632e6dd872..f56837f759 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -450,12 +450,13 @@ int metaAddIndexToSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { goto _err; } if (IS_IDX_ON(pNew) && !IS_IDX_ON(pOld)) { - if (diffIdx != -1) goto _err; + // if (diffIdx != -1) goto _err; diffIdx = i; + break; } } - if (diffIdx == -1 || diffIdx == 0) { + if (diffIdx == -1) { goto _err; } @@ -586,7 +587,7 @@ int metaDropIndexFromSTable(SMeta *pMeta, int64_t version, SDropIndexReq *pReq) for (int i = 0; i < oStbEntry.stbEntry.schemaTag.nCols; i++) { SSchema *schema = oStbEntry.stbEntry.schemaTag.pSchema + i; if (0 == strncmp(schema->name, pReq->colName, sizeof(pReq->colName))) { - if (i != 0 || IS_IDX_ON(schema)) { + if (IS_IDX_ON(schema)) { pCol = schema; } break; @@ -2094,7 +2095,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { } else { for (int i = 0; i < pTagSchema->nCols; i++) { pTagColumn = &pTagSchema->pSchema[i]; - if (i != 0 && !IS_IDX_ON(pTagColumn)) continue; + if (!IS_IDX_ON(pTagColumn)) continue; STagVal tagVal = {.cid = pTagColumn->colId}; tTagGet((const STag *)pCtbEntry->ctbEntry.pTags, &tagVal); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 38118c03f8..d2dc1f3320 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -31,8 +31,8 @@ #define SYSTABLE_SHOW_TYPE_OFFSET QUERY_NODE_SHOW_DNODES_STMT typedef struct SRewriteTbNameContext { - int32_t errCode; - char* pTbName; + int32_t errCode; + char* pTbName; } SRewriteTbNameContext; typedef struct STranslateContext { @@ -54,7 +54,7 @@ typedef struct STranslateContext { bool stableQuery; bool showRewrite; SNode* pPrevRoot; - SNode* pPostRoot; + SNode* pPostRoot; } STranslateContext; typedef struct SBuildTopicContext { @@ -278,10 +278,11 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode); static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode); static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal); -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt); -static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); -static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); -static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt); +static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); +static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); +static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_BY; } @@ -772,7 +773,8 @@ static SNodeList* getProjectList(const SNode* pNode) { static bool isTimeLineQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { - return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); + return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || + (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); } else if (QUERY_NODE_SET_OPERATOR == nodeType(pStmt)) { return TIME_LINE_GLOBAL == ((SSetOperator*)pStmt)->timeLineResMode; } else { @@ -791,7 +793,7 @@ static bool isGlobalTimeLineQuery(SNode* pStmt) { } static bool isTimeLineAlignedQuery(SNode* pStmt) { - SSelectStmt *pSelect = (SSelectStmt *)pStmt; + SSelectStmt* pSelect = (SSelectStmt*)pStmt; if (isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return true; } @@ -801,7 +803,7 @@ static bool isTimeLineAlignedQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT != nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return false; } - SSelectStmt *pSub = (SSelectStmt *)((STempTableNode*)pSelect->pFromTable)->pSubquery; + SSelectStmt* pSub = (SSelectStmt*)((STempTableNode*)pSelect->pFromTable)->pSubquery; if (nodesListMatch(pSelect->pPartitionByList, pSub->pPartitionByList)) { return true; } @@ -822,18 +824,18 @@ static bool isPrimaryKeyImpl(SNode* pExpr) { return true; } } else if (QUERY_NODE_OPERATOR == nodeType(pExpr)) { - SOperatorNode* pOper = (SOperatorNode*)pExpr; - if (OP_TYPE_ADD != pOper->opType && OP_TYPE_SUB != pOper->opType) { - return false; - } - if (!isPrimaryKeyImpl(pOper->pLeft)) { - return false; - } - if (QUERY_NODE_VALUE != nodeType(pOper->pRight)) { - return false; - } - return true; + SOperatorNode* pOper = (SOperatorNode*)pExpr; + if (OP_TYPE_ADD != pOper->opType && OP_TYPE_SUB != pOper->opType) { + return false; } + if (!isPrimaryKeyImpl(pOper->pLeft)) { + return false; + } + if (QUERY_NODE_VALUE != nodeType(pOper->pRight)) { + return false; + } + return true; + } return false; } @@ -860,7 +862,7 @@ static void setColumnInfoBySchema(const SRealTableNode* pTable, const SSchema* p pCol->tableType = pTable->pMeta->tableType; pCol->colId = pColSchema->colId; pCol->colType = (tagFlag >= 0 ? COLUMN_TYPE_TAG : COLUMN_TYPE_COLUMN); - pCol->hasIndex = ((0 == tagFlag) || (pColSchema != NULL && IS_IDX_ON(pColSchema))); + pCol->hasIndex = (pColSchema != NULL && IS_IDX_ON(pColSchema)); pCol->node.resType.type = pColSchema->type; pCol->node.resType.bytes = pColSchema->bytes; if (TSDB_DATA_TYPE_TIMESTAMP == pCol->node.resType.type) { @@ -1406,7 +1408,7 @@ static bool isCountStar(SFunctionNode* pFunc) { } static int32_t rewriteCountStarAsCount1(STranslateContext* pCxt, SFunctionNode* pCount) { - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1608,9 +1610,11 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } - if (pSelect->hasInterpFunc && (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { + if (pSelect->hasInterpFunc && + (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s ignoring null value options cannot be used when applying to multiple columns", pFunc->functionName); + "%s ignoring null value options cannot be used when applying to multiple columns", + pFunc->functionName); } if (NULL != pSelect->pWindow || NULL != pSelect->pGroupByList) { @@ -1648,7 +1652,8 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu } SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && - !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { + !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && + !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, "%s function requires valid time series input", pFunc->functionName); } @@ -1718,8 +1723,8 @@ static int32_t translateForbidSysTableFunc(STranslateContext* pCxt, SFunctionNod return TSDB_CODE_SUCCESS; } - SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; - SNode* pTable = pSelect->pFromTable; + SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; + SNode* pTable = pSelect->pFromTable; if (NULL != pTable && QUERY_NODE_REAL_TABLE == nodeType(pTable) && TSDB_SYSTEM_TABLE == ((SRealTableNode*)pTable)->pMeta->tableType) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, pFunc->functionName); @@ -2308,7 +2313,8 @@ static EDealRes doCheckExprForGroupBy(SNode** pNode, void* pContext) { } } if (isScanPseudoColumnFunc(*pNode) || QUERY_NODE_COLUMN == nodeType(*pNode)) { - if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { + if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || + (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { return generateDealNodeErrMsg(pCxt, getGroupByErrorCode(pCxt), ((SExprNode*)(*pNode))->userAlias); } else { return rewriteColToSelectValFunc(pCxt, pNode); @@ -2403,14 +2409,14 @@ static int32_t checkHavingGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) if (NULL != pSelect->pHaving) { code = checkExprForGroupBy(pCxt, &pSelect->pHaving); } -/* - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); - } - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); + } + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); + } + */ return code; } @@ -2669,10 +2675,10 @@ static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSele static EDealRes doTranslateTbName(SNode** pNode, void* pContext) { switch (nodeType(*pNode)) { case QUERY_NODE_FUNCTION: { - SFunctionNode *pFunc = (SFunctionNode *)*pNode; + SFunctionNode* pFunc = (SFunctionNode*)*pNode; if (FUNCTION_TYPE_TBNAME == pFunc->funcType) { - SRewriteTbNameContext *pCxt = (SRewriteTbNameContext*)pContext; - SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + SRewriteTbNameContext* pCxt = (SRewriteTbNameContext*)pContext; + SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; return DEAL_RES_ERROR; @@ -2711,11 +2717,12 @@ static int32_t replaceTbName(STranslateContext* pCxt, SSelectStmt* pSelect) { } SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable; - if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { + if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && + TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { return TSDB_CODE_SUCCESS; } - SNode** pNode = NULL; + SNode** pNode = NULL; SRewriteTbNameContext pRewriteCxt = {0}; pRewriteCxt.pTbName = pTable->table.tableName; @@ -3122,7 +3129,8 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList code = scalarCalculateConstants(pCastFunc, &pCell->pNode); } if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) { - code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); + code = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); } else if (TSDB_CODE_SUCCESS != code) { code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch"); } @@ -3588,7 +3596,6 @@ static int32_t createDefaultEveryNode(STranslateContext* pCxt, SNode** pOutput) pEvery->isDuration = true; pEvery->literal = taosStrdup("1s"); - *pOutput = (SNode*)pEvery; return TSDB_CODE_SUCCESS; } @@ -3683,15 +3690,15 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->currClause = SQL_CLAUSE_PARTITION_BY; int32_t code = TSDB_CODE_SUCCESS; - + if (pSelect->pPartitionByList) { int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable); SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0); - if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && - 1 == pSelect->pPartitionByList->length && (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { + if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && 1 == pSelect->pPartitionByList->length && + (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { pSelect->timeLineResMode = TIME_LINE_MULTI; } - + code = translateExprList(pCxt, pSelect->pPartitionByList); } if (TSDB_CODE_SUCCESS == code) { @@ -3955,9 +3962,9 @@ static int32_t translateSetOperProject(STranslateContext* pCxt, SSetOperator* pS } snprintf(pRightExpr->aliasName, sizeof(pRightExpr->aliasName), "%s", pLeftExpr->aliasName); SNode* pProj = createSetOperProject(pSetOperator->stmtName, pLeft); - if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) - && ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID - && ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { + if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) && + ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID && + ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { ((SColumnNode*)pProj)->colId = PRIMARYKEY_TIMESTAMP_COL_ID; } if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSetOperator->pProjectionList, pProj)) { @@ -5737,7 +5744,6 @@ static int32_t translateRestoreDnode(STranslateContext* pCxt, SRestoreComponentN return buildCmdMsg(pCxt, TDMT_MND_RESTORE_DNODE, (FSerializeFunc)tSerializeSRestoreDnodeReq, &restoreReq); } - static int32_t getSmaIndexDstVgId(STranslateContext* pCxt, const char* pDbName, const char* pTableName, int32_t* pVgId) { SVgroupInfo vg = {0}; @@ -5865,7 +5871,7 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS } static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { - int32_t code = checkCreateSmaIndex(pCxt, pStmt); + int32_t code = checkCreateSmaIndex(pCxt, pStmt); pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq)); if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY; if (TSDB_CODE_SUCCESS == code) { @@ -5879,13 +5885,15 @@ int32_t createIntervalFromCreateSmaIndexStmt(SCreateIndexStmt* pStmt, SInterval* pInterval->interval = ((SValueNode*)pStmt->pOptions->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit; pInterval->offset = NULL != pStmt->pOptions->pOffset ? ((SValueNode*)pStmt->pOptions->pOffset)->datum.i : 0; - pInterval->sliding = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; - pInterval->slidingUnit = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; + pInterval->sliding = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; + pInterval->slidingUnit = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; pInterval->precision = pStmt->pOptions->tsPrecision; return TSDB_CODE_SUCCESS; } -int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void ** pResRow) { +int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { int32_t code = TSDB_CODE_SUCCESS; SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot; int64_t lastTs = 0; @@ -6053,7 +6061,7 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS toName(pCxt->pParseCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name); tNameGetFullDbName(&name, pReq->subDbName); tNameExtractFullName(&name, pReq->subStbName); - if(pStmt->pQuery != NULL) { + if (pStmt->pQuery != NULL) { code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL); } } else if ('\0' != pStmt->subDbName[0]) { @@ -6108,11 +6116,12 @@ static EDealRes checkColumnTagsInCond(SNode* pNode, void* pContext) { addTagList(&pCxt->pTags, nodesCloneNode(pNode)); } } - + return DEAL_RES_CONTINUE; } -static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, SNodeList** ppProjection) { +static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, + SNodeList** ppProjection) { SBuildTopicContext colCxt = {.colExists = false, .colNotFound = false, .pMeta = pMeta, .pTags = NULL}; nodesWalkExprPostOrder(pStmt->pWhere, checkColumnTagsInCond, &colCxt); if (colCxt.colNotFound) { @@ -6122,18 +6131,18 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* nodesDestroyList(colCxt.pTags); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Columns are forbidden in where clause"); } - if (NULL == colCxt.pTags) { // put one column to select -// for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { - SSchema* column = &pMeta->schema[0]; - SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); - if (NULL == col) { - return TSDB_CODE_OUT_OF_MEMORY; - } - strcpy(col->colName, column->name); - strcpy(col->node.aliasName, col->colName); - strcpy(col->node.userAlias, col->colName); - addTagList(&colCxt.pTags, (SNode*)col); -// } + if (NULL == colCxt.pTags) { // put one column to select + // for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { + SSchema* column = &pMeta->schema[0]; + SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == col) { + return TSDB_CODE_OUT_OF_MEMORY; + } + strcpy(col->colName, column->name); + strcpy(col->node.aliasName, col->colName); + strcpy(col->node.userAlias, col->colName); + addTagList(&colCxt.pTags, (SNode*)col); + // } } *ppProjection = colCxt.pTags; @@ -6141,13 +6150,13 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* } static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt* pStmt, SNode** pSelect) { - SParseContext* pParCxt = pCxt->pParseCxt; - SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, - .requestId = pParCxt->requestId, + SParseContext* pParCxt = pCxt->pParseCxt; + SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, + .requestId = pParCxt->requestId, .requestObjRefId = pParCxt->requestRid, .mgmtEps = pParCxt->mgmtEpSet}; - SName name; - STableMeta* pMeta = NULL; + SName name; + STableMeta* pMeta = NULL; int32_t code = getTableMetaImpl(pCxt, toName(pParCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name), &pMeta); if (code) { taosMemoryFree(pMeta); @@ -6156,7 +6165,7 @@ static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt if (TSDB_SUPER_TABLE != pMeta->tableType) { taosMemoryFree(pMeta); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Only supertable table can be used"); - } + } SNodeList* pProjection = NULL; code = checkCollectTopicTags(pCxt, pStmt, pMeta, &pProjection); @@ -6554,7 +6563,8 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must be of VARCHAR type"); } - if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { + if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && + subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must not has column when no partition by clause"); } @@ -6910,28 +6920,28 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta if (NULL == col) { return TSDB_CODE_OUT_OF_MEMORY; } - + strcpy(col->tableAlias, pTable); strcpy(col->colName, pMeta->schema[0].name); SNodeList* pParamterList = nodesMakeList(); if (NULL == pParamterList) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); return TSDB_CODE_OUT_OF_MEMORY; } - - int32_t code = nodesListStrictAppend(pParamterList, (SNode *)col); + + int32_t code = nodesListStrictAppend(pParamterList, (SNode*)col); if (code) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); nodesDestroyList(pParamterList); return code; } - + SNode* pFunc = (SNode*)createFunction("last", pParamterList); if (NULL == pFunc) { nodesDestroyList(pParamterList); return TSDB_CODE_OUT_OF_MEMORY; } - + SNodeList* pProjectionList = nodesMakeList(); if (NULL == pProjectionList) { nodesDestroyList(pParamterList); @@ -6943,8 +6953,8 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta nodesDestroyList(pProjectionList); return code; } - - code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt **)pQuery); + + code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt**)pQuery); if (code) { nodesDestroyList(pProjectionList); return code; @@ -6982,14 +6992,14 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt if (TSDB_CODE_SUCCESS == code && pStmt->pOptions->fillHistory) { SRealTableNode* pTable = (SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable); code = createLastTsSelectStmt(pTable->table.dbName, pTable->table.tableName, pTable->pMeta, &pStmt->pPrevQuery); -/* - if (TSDB_CODE_SUCCESS == code) { - STranslateContext cxt = {0}; - int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); - code = translateQuery(&cxt, pStmt->pPrevQuery); - destroyTranslateContext(&cxt); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code) { + STranslateContext cxt = {0}; + int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); + code = translateQuery(&cxt, pStmt->pPrevQuery); + destroyTranslateContext(&cxt); + } + */ } taosMemoryFree(pMeta); return code; @@ -7084,7 +7094,7 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow)) { return code; } - + SIntervalWindowNode* pWindow = (SIntervalWindowNode*)pSelect->pWindow; pInterval->interval = ((SValueNode*)pWindow->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pWindow->pInterval)->unit; @@ -7092,16 +7102,16 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval->sliding = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->datum.i : pInterval->interval); pInterval->slidingUnit = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->unit : pInterval->intervalUnit); - pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; + pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; return code; } int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { SCreateStreamStmt* pStmt = (SCreateStreamStmt*)pQuery->pRoot; - STranslateContext cxt = {0}; - SInterval interval = {0}; - int64_t lastTs = 0; + STranslateContext cxt = {0}; + SInterval interval = {0}; + int64_t lastTs = 0; int32_t code = initTranslateContext(pParseCxt, NULL, &cxt); if (TSDB_CODE_SUCCESS == code) { @@ -7136,7 +7146,6 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void return code; } - static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pStmt) { SMDropStreamReq dropReq = {0}; SName name; @@ -7261,7 +7270,7 @@ static int32_t translateGrantTagCond(STranslateContext* pCxt, SGrantStmt* pStmt, } } - int32_t code = createRealTableForGrantTable(pStmt, &pTable); + int32_t code = createRealTableForGrantTable(pStmt, &pTable); if (TSDB_CODE_SUCCESS == code) { SName name; code = getTableMetaImpl(pCxt, toName(pCxt->pParseCxt->acctId, pTable->table.dbName, pTable->table.tableName, &name), @@ -7821,7 +7830,8 @@ static SNodeList* createProjectCols(int32_t ncols, const char* const pCols[]) { return pProjections; } -static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { SSelectStmt* pSelect = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT); if (NULL == pSelect) { return TSDB_CODE_OUT_OF_MEMORY; @@ -7844,9 +7854,8 @@ static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, S return TSDB_CODE_SUCCESS; } - static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTable, int32_t numOfProjs, - const char* const pProjCol[], SSelectStmt** pStmt) { + const char* const pProjCol[], SSelectStmt** pStmt) { SNodeList* pProjectionList = NULL; if (numOfProjs >= 0) { pProjectionList = createProjectCols(numOfProjs, pProjCol); @@ -7858,13 +7867,15 @@ static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTabl return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } static int32_t createSelectStmtForShow(ENodeType showType, SSelectStmt** pStmt) { const SSysTableShowAdapter* pShow = &sysTableShowAdapter[showType - SYSTABLE_SHOW_TYPE_OFFSET]; - return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, pStmt); + return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, + pStmt); } static int32_t createSelectStmtForShowTableDist(SShowTableDistributedStmt* pStmt, SSelectStmt** pOutput) { @@ -8002,8 +8013,8 @@ static int32_t createShowTableTagsProjections(SNodeList** pProjections, SNodeLis static int32_t rewriteShowStableTags(STranslateContext* pCxt, SQuery* pQuery) { SShowTableTagsStmt* pShow = (SShowTableTagsStmt*)pQuery->pRoot; SSelectStmt* pSelect = NULL; - int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal, - -1, NULL, &pSelect); + int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, + ((SValueNode*)pShow->pTbName)->literal, -1, NULL, &pSelect); if (TSDB_CODE_SUCCESS == code) { code = createShowTableTagsProjections(&pSelect->pProjectionList, &pShow->pTags); } From f9c897221cc66cd49bfeaa0905e802a305aece6a Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 14 Aug 2023 16:30:31 +0800 Subject: [PATCH 53/81] fix: move the setting of onlyCtbIdx to front end --- source/libs/executor/src/operator.c | 1 - source/libs/planner/src/planPhysiCreater.c | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index abef8298e5..7f0c5baa36 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -371,7 +371,6 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; - pTagScanPhyNode->onlyMetaCtbIdx = false; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 06859e195d..8efa9c1048 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -511,6 +511,20 @@ static int32_t createSimpleScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSub return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, pScan, pPhyNode); } +static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, + SPhysiNode** pPhyNode) { + STagScanPhysiNode* pScan = + (STagScanPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pScanLogicNode, QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN); + if (NULL == pScan) { + return TSDB_CODE_OUT_OF_MEMORY; + } + vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); + + pScan->onlyMetaCtbIdx = false; + + return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); +} + static int32_t createLastRowScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, SPhysiNode** pPhyNode) { SLastRowScanPhysiNode* pScan = @@ -646,6 +660,7 @@ static int32_t createScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, pCxt->hasScan = true; switch (pScanLogicNode->scanType) { case SCAN_TYPE_TAG: + return createTagScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode); case SCAN_TYPE_BLOCK_INFO: return createSimpleScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode); case SCAN_TYPE_TABLE_COUNT: From 8d0461e98ca13a58c4bee1b9964a1ee3920b5346 Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 14 Aug 2023 16:31:58 +0800 Subject: [PATCH 54/81] fix: use taos_static for tmq_sim on windows --- cmake/cmake.define | 6 ++++++ tests/script/wtest.bat | 14 +++++++++++--- utils/test/c/CMakeLists.txt | 2 +- utils/tsim/CMakeLists.txt | 2 +- 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index cf7f450994..6f4153c7d0 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -78,6 +78,12 @@ ELSE () SET(TD_TAOS_TOOLS TRUE) ENDIF () +IF (${TD_WINDOWS}) + SET(TAOS_LIB taos_static) +ELSE () + SET(TAOS_LIB taos) +ENDIF () + IF (TD_WINDOWS) MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd") diff --git a/tests/script/wtest.bat b/tests/script/wtest.bat index b642bad285..88ae703b7c 100644 --- a/tests/script/wtest.bat +++ b/tests/script/wtest.bat @@ -17,6 +17,9 @@ rem echo SIM_DIR: %SIM_DIR% set "TSIM_DIR=%SIM_DIR%tsim\" rem echo TSIM_DIR: %TSIM_DIR% +set "DATA_DIR=%TSIM_DIR%data\" +rem echo DATA_DIR: %DATA_DIR% + set "CFG_DIR=%TSIM_DIR%cfg\" rem echo CFG_DIR: %CFG_DIR% @@ -30,25 +33,30 @@ if not exist %SIM_DIR% mkdir %SIM_DIR% if not exist %TSIM_DIR% mkdir %TSIM_DIR% if exist %CFG_DIR% rmdir /s/q %CFG_DIR% if exist %LOG_DIR% rmdir /s/q %LOG_DIR% +if exist %DATA_DIR% rmdir /s/q %DATA_DIR% if not exist %CFG_DIR% mkdir %CFG_DIR% if not exist %LOG_DIR% mkdir %LOG_DIR% +if not exist %DATA_DIR% mkdir %DATA_DIR% set "fqdn=localhost" for /f "skip=1" %%A in ( 'wmic computersystem get caption' ) do if not defined fqdn set "fqdn=%%A" -echo firstEp %fqdn% > %TAOS_CFG% +echo firstEp %fqdn%:7100 > %TAOS_CFG% +echo secondEp %fqdn%:7200 >> %TAOS_CFG% echo fqdn %fqdn% >> %TAOS_CFG% echo serverPort 7100 >> %TAOS_CFG% +echo dataDir %DATA_DIR% >> %TAOS_CFG% echo logDir %LOG_DIR% >> %TAOS_CFG% echo scriptDir %SCRIPT_DIR% >> %TAOS_CFG% echo numOfLogLines 100000000 >> %TAOS_CFG% echo rpcDebugFlag 143 >> %TAOS_CFG% echo tmrDebugFlag 131 >> %TAOS_CFG% -echo cDebugFlag 135 >> %TAOS_CFG% +echo cDebugFlag 143 >> %TAOS_CFG% echo qDebugFlag 143 >> %TAOS_CFG% -echo udebugFlag 135 >> %TAOS_CFG% +echo uDebugFlag 143 >> %TAOS_CFG% +echo debugFlag 143 >> %TAOS_CFG% echo wal 0 >> %TAOS_CFG% echo asyncLog 0 >> %TAOS_CFG% echo locale en_US.UTF-8 >> %TAOS_CFG% diff --git a/utils/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt index 3f52fc8e5d..b96814c13b 100644 --- a/utils/test/c/CMakeLists.txt +++ b/utils/test/c/CMakeLists.txt @@ -31,7 +31,7 @@ target_link_libraries( ) target_link_libraries( tmq_sim - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os diff --git a/utils/tsim/CMakeLists.txt b/utils/tsim/CMakeLists.txt index c2cf7ac3c5..209982c659 100644 --- a/utils/tsim/CMakeLists.txt +++ b/utils/tsim/CMakeLists.txt @@ -2,7 +2,7 @@ aux_source_directory(src TSIM_SRC) add_executable(tsim ${TSIM_SRC}) target_link_libraries( tsim - PUBLIC taos_static + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os From 1d33a8d4c09e0c3eaa48b08da506df3d1611aebf Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 14 Aug 2023 10:06:33 +0000 Subject: [PATCH 55/81] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndStb.c | 43 ++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 70fd74afc0..903181282a 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -18,6 +18,7 @@ #include "mndDb.h" #include "mndDnode.h" #include "mndIndex.h" +#include "mndIndexComm.h" #include "mndInfoSchema.h" #include "mndMnode.h" #include "mndPerfSchema.h" @@ -822,7 +823,7 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat return -1; } - if(pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags){ + if (pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags) { terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; } @@ -857,11 +858,36 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea SStbObj stbObj = {0}; int32_t code = -1; + char fullIdxName[TSDB_INDEX_FNAME_LEN * 2] = {0}; + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "create-stb"); if (pTrans == NULL) goto _OVER; mInfo("trans:%d, used to create stb:%s", pTrans->id, pCreate->name); if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER; + + SSchema *pSchema = &(stbObj.pTags[0]); + sprintf(fullIdxName, "%s.%s_default", pDb->name, pSchema->name); + + SSIdx idx = {0}; + if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0) { + terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; + mndReleaseIdx(pMnode, idx.pIdx); + goto _OVER; + } + + SIdxObj idxObj; + memcpy(idxObj.name, fullIdxName, TSDB_INDEX_FNAME_LEN); + memcpy(idxObj.stb, stbObj.name, TSDB_TABLE_FNAME_LEN); + memcpy(idxObj.db, stbObj.db, TSDB_DB_FNAME_LEN); + memcpy(idxObj.colName, pSchema->name, TSDB_COL_NAME_LEN); + idxObj.createdTime = taosGetTimestampMs(); + idxObj.uid = mndGenerateUid(fullIdxName, strlen(fullIdxName)); + idxObj.stbUid = stbObj.uid; + idxObj.dbUid = stbObj.dbUid; + + if (mndSetCreateIdxCommitLogs(pMnode, pTrans, &idxObj) < 0) goto _OVER; + if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; code = 0; @@ -956,7 +982,7 @@ static int32_t mndBuildStbFromAlter(SStbObj *pStb, SStbObj *pDst, SMCreateStbReq return -1; } - if(pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags){ + if (pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags) { terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; } @@ -1188,7 +1214,7 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p return -1; } - if(pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ntags){ + if (pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ntags) { terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; } @@ -1478,7 +1504,8 @@ static int32_t mndAlterStbTagBytes(SMnode *pMnode, const SStbObj *pOld, SStbObj SSchema *pTag = pNew->pTags + tag; - if (!(pTag->type == TSDB_DATA_TYPE_BINARY || pTag->type == TSDB_DATA_TYPE_NCHAR || pTag->type == TSDB_DATA_TYPE_GEOMETRY)) { + if (!(pTag->type == TSDB_DATA_TYPE_BINARY || pTag->type == TSDB_DATA_TYPE_NCHAR || + pTag->type == TSDB_DATA_TYPE_GEOMETRY)) { terrno = TSDB_CODE_MND_INVALID_STB_OPTION; return -1; } @@ -1506,7 +1533,7 @@ static int32_t mndAddSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, SArray return -1; } - if(pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ncols){ + if (pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ncols) { terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; } @@ -1598,7 +1625,8 @@ static int32_t mndAlterStbColumnBytes(SMnode *pMnode, const SStbObj *pOld, SStbO } SSchema *pCol = pNew->pColumns + col; - if (!(pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR || pCol->type == TSDB_DATA_TYPE_GEOMETRY)) { + if (!(pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR || + pCol->type == TSDB_DATA_TYPE_GEOMETRY)) { terrno = TSDB_CODE_MND_INVALID_STB_OPTION; return -1; } @@ -3182,7 +3210,6 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB SSdb *pSdb = pMnode->pSdb; SStbObj *pStb = NULL; - int32_t numOfRows = 0; if (!pShow->sysDbRsp) { numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb); @@ -3206,7 +3233,7 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB if (pShow->pIter == NULL) break; } else { fetch = true; - void *pKey = taosHashGetKey(pShow->pIter, NULL); + void *pKey = taosHashGetKey(pShow->pIter, NULL); pStb = sdbAcquire(pSdb, SDB_STB, pKey); if (!pStb) continue; } From 012248b68142496f0e8e2fa1833b4df4912b2c82 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 14 Aug 2023 19:26:53 +0800 Subject: [PATCH 56/81] fix: move the only ctb idx flag to logical plan --- include/libs/nodes/plannodes.h | 1 + source/libs/nodes/src/nodesCodeFuncs.c | 10 ++++++++-- source/libs/planner/src/planOptimizer.c | 3 +++ source/libs/planner/src/planPhysiCreater.c | 2 +- 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 0830dc4918..3e24e417fc 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -107,6 +107,7 @@ typedef struct SScanLogicNode { bool sortPrimaryKey; bool igLastNull; bool groupOrderScan; + bool onlyMetaCtbIdx; // for tag scan with no tbname } SScanLogicNode; typedef struct SJoinLogicNode { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index a2de0bc63a..4dfc55c0fa 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -660,6 +660,7 @@ static const char* jkScanLogicPlanDynamicScanFuncs = "DynamicScanFuncs"; static const char* jkScanLogicPlanDataRequired = "DataRequired"; static const char* jkScanLogicPlanTagCond = "TagCond"; static const char* jkScanLogicPlanGroupTags = "GroupTags"; +static const char* jkScanLogicPlanOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) { const SScanLogicNode* pNode = (const SScanLogicNode*)pObj; @@ -701,7 +702,9 @@ static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = nodeListToJson(pJson, jkScanLogicPlanGroupTags, pNode->pGroupTags); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkScanLogicPlanOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); + } return code; } @@ -746,7 +749,10 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkScanLogicPlanGroupTags, &pNode->pGroupTags); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkScanLogicPlanOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); + } + return code; } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 16440be511..6944fc9f18 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -2679,6 +2679,9 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp } nodesDestroyNode((SNode*)pAgg); tagScanOptCloneAncestorSlimit((SLogicNode*)pScanNode); + + pScanNode->onlyMetaCtbIdx = false; + pCxt->optimized = true; return TSDB_CODE_SUCCESS; } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 8efa9c1048..5f78b5de9c 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -520,7 +520,7 @@ static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpla } vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); - pScan->onlyMetaCtbIdx = false; + pScan->onlyMetaCtbIdx = pScanLogicNode->onlyMetaCtbIdx; return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); } From a48d137d32c039fb68764abde491eb221e992e75 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 14 Aug 2023 11:56:47 +0000 Subject: [PATCH 57/81] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndStb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 903181282a..cfac5d0a61 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -870,7 +870,7 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea sprintf(fullIdxName, "%s.%s_default", pDb->name, pSchema->name); SSIdx idx = {0}; - if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0) { + if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0 && idx.pIdx != NULL) { terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; mndReleaseIdx(pMnode, idx.pIdx); goto _OVER; From 3067417ea31eacf21f94fe57c6691f5c2a60d4d8 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 14 Aug 2023 12:01:17 +0000 Subject: [PATCH 58/81] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndStb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index cfac5d0a61..03b05f8c82 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -873,6 +873,7 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0 && idx.pIdx != NULL) { terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; mndReleaseIdx(pMnode, idx.pIdx); + goto _OVER; } From 1447d1d55c036ac5f050e3072e01d06d33f5200d Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 00:40:12 +0000 Subject: [PATCH 59/81] rebuild index at tag0 --- include/os/osRand.h | 2 ++ source/dnode/mnode/impl/src/mndStb.c | 4 +++- source/os/src/osRand.c | 17 +++++++++++++---- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/include/os/osRand.h b/include/os/osRand.h index 27d07e8c6f..5d907bba15 100644 --- a/include/os/osRand.h +++ b/include/os/osRand.h @@ -32,6 +32,8 @@ void taosSeedRand(uint32_t seed); uint32_t taosRand(void); uint32_t taosRandR(uint32_t* pSeed); void taosRandStr(char* str, int32_t size); +void taosRandStr2(char* str, int32_t size); + uint32_t taosSafeRand(void); #ifdef __cplusplus diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 03b05f8c82..ccfad404d8 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -866,8 +866,10 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea mInfo("trans:%d, used to create stb:%s", pTrans->id, pCreate->name); if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER; + char randStr[16] = {0}; + taosRandStr2(randStr, tListLen(randStr) - 1); SSchema *pSchema = &(stbObj.pTags[0]); - sprintf(fullIdxName, "%s.%s_default", pDb->name, pSchema->name); + sprintf(fullIdxName, "%s.%s_%s", pDb->name, pSchema->name, randStr); SSIdx idx = {0}; if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0 && idx.pIdx != NULL) { diff --git a/source/os/src/osRand.c b/source/os/src/osRand.c index 83c36a422d..9cb6f6e52a 100644 --- a/source/os/src/osRand.c +++ b/source/os/src/osRand.c @@ -27,11 +27,11 @@ void taosSeedRand(uint32_t seed) { return srand(seed); } uint32_t taosRand(void) { #ifdef WINDOWS - unsigned int pSeed; - rand_s(&pSeed); - return pSeed; + unsigned int pSeed; + rand_s(&pSeed); + return pSeed; #else - return rand(); + return rand(); #endif } @@ -80,6 +80,15 @@ void taosRandStr(char* str, int32_t size) { const char* set = "abcdefghijklmnopqrstuvwxyz0123456789-_."; int32_t len = 39; + for (int32_t i = 0; i < size; ++i) { + str[i] = set[taosRand() % len]; + } +} + +void taosRandStr2(char* str, int32_t size) { + const char* set = "abcdefghijklmnopqrstuvwxyz0123456789"; + int32_t len = 36; + for (int32_t i = 0; i < size; ++i) { str[i] = set[taosRand() % len]; } From ef7f762c62b6bb43272a189ca16225738209f872 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 00:41:48 +0000 Subject: [PATCH 60/81] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndStb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index ccfad404d8..f80e721324 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -866,7 +866,7 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea mInfo("trans:%d, used to create stb:%s", pTrans->id, pCreate->name); if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER; - char randStr[16] = {0}; + char randStr[24] = {0}; taosRandStr2(randStr, tListLen(randStr) - 1); SSchema *pSchema = &(stbObj.pTags[0]); sprintf(fullIdxName, "%s.%s_%s", pDb->name, pSchema->name, randStr); From 9ec64b9201a34223eaec8e46f91e9c8c00283d32 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 02:01:13 +0000 Subject: [PATCH 61/81] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndStb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index f80e721324..c6dce0d578 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -879,7 +879,7 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea goto _OVER; } - SIdxObj idxObj; + SIdxObj idxObj = {0}; memcpy(idxObj.name, fullIdxName, TSDB_INDEX_FNAME_LEN); memcpy(idxObj.stb, stbObj.name, TSDB_TABLE_FNAME_LEN); memcpy(idxObj.db, stbObj.db, TSDB_DB_FNAME_LEN); From 271ecf6beff4dcea8e83b6945406a3050dd2d793 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 10:41:01 +0800 Subject: [PATCH 62/81] update tag index case --- tests/system-test/0-others/show_tag_index.py | 20 +++++++++++-------- tests/system-test/0-others/tag_index_basic.py | 7 +++++-- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/tests/system-test/0-others/show_tag_index.py b/tests/system-test/0-others/show_tag_index.py index 663426b7ff..d39f9eaab9 100644 --- a/tests/system-test/0-others/show_tag_index.py +++ b/tests/system-test/0-others/show_tag_index.py @@ -59,14 +59,18 @@ class TDTestCase: tdSql.checkData(1, 2, 2) def check_indexes(self): - tdSql.checkRows(1) - tdSql.checkCols(7) - tdSql.checkData(0, 0, 'idx1') - tdSql.checkData(0, 1, 'db') - tdSql.checkData(0, 2, 'stb') - tdSql.checkData(0, 3, None) - tdSql.checkData(0, 5, 't1') - tdSql.checkData(0, 6, 'tag_index') + tdSql.checkRows(2) + for i in range(2): + col_name = tdSql.getData(i, 5) + if col_name == "t0": + continue + tdSql.checkCols(7) + tdSql.checkData(i, 0, 'idx1') + tdSql.checkData(i, 1, 'db') + tdSql.checkData(i, 2, 'stb') + tdSql.checkData(i, 3, None) + tdSql.checkData(i, 5, 't1') + tdSql.checkData(i, 6, 'tag_index') def run(self): tdSql.execute(f'create database db') diff --git a/tests/system-test/0-others/tag_index_basic.py b/tests/system-test/0-others/tag_index_basic.py index 72ed559ffd..c1e1d521d2 100644 --- a/tests/system-test/0-others/tag_index_basic.py +++ b/tests/system-test/0-others/tag_index_basic.py @@ -118,12 +118,15 @@ class TDTestCase: def show_tagidx(self, stbname): sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="db"' tdSql.query(sql) - rows = len(self.tag_dict.keys())-1 + rows = len(self.tag_dict.keys()) tdSql.checkRows(rows) for i in range(rows): col_name = tdSql.getData(i, 1) idx_name = f'idx_{col_name}' + # skip first tag + if col_name == "t1": + continue tdSql.checkData(i, 0, idx_name) tdLog.info(f' show {rows} tag indexs ok.') @@ -201,7 +204,7 @@ class TDTestCase: # check idx result is 0 sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="db"' tdSql.query(sql) - tdSql.checkRows(0) + tdSql.checkRows(1) tdLog.info(f' drop {cnt} tag indexs ok.') # create long name idx From f7a5bef17dbd3d70365528370973618b31c2439e Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 15 Aug 2023 13:02:32 +0800 Subject: [PATCH 63/81] enh: disable stream/udf on windows --- include/util/taoserror.h | 1 + source/dnode/mgmt/mgmt_dnode/src/dmInt.c | 2 ++ source/dnode/mgmt/mgmt_qnode/src/qmInt.c | 2 ++ source/dnode/mgmt/mgmt_snode/src/smInt.c | 2 ++ source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 2 ++ source/dnode/mgmt/node_mgmt/src/dmEnv.c | 4 +++- source/libs/parser/src/parTranslater.c | 13 +++++++++++++ source/util/src/terror.c | 1 + 8 files changed, 26 insertions(+), 1 deletion(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index b43985074c..75ab916230 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -707,6 +707,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667) #define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668) #define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669) +#define TSDB_CODE_PAR_INVALID_PLATFORM TAOS_DEF_ERROR_CODE(0, 0x2670) #define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF) //planner diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c index 09783a5ea9..f59d04e618 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c @@ -59,9 +59,11 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { return -1; } +#ifdef WINDOWS if (udfStartUdfd(pMgmt->pData->dnodeId) != 0) { dError("failed to start udfd"); } +#endif pOutput->pMgmt = pMgmt; return 0; diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c index 3b425a0b49..82bc2f36f0 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c @@ -57,11 +57,13 @@ static int32_t qmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("qnode-impl", "initialized"); +#ifdef WINDOWS if (udfcOpen() != 0) { dError("qnode can not open udfc"); qmClose(pMgmt); return -1; } +#endif if (qmStartWorker(pMgmt) != 0) { dError("failed to start qnode worker since %s", terrstr()); diff --git a/source/dnode/mgmt/mgmt_snode/src/smInt.c b/source/dnode/mgmt/mgmt_snode/src/smInt.c index e222349767..7607fcac61 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smInt.c +++ b/source/dnode/mgmt/mgmt_snode/src/smInt.c @@ -65,11 +65,13 @@ int32_t smOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("snode-worker", "initialized"); +#ifdef WINDOWS if (udfcOpen() != 0) { dError("failed to open udfc in snode"); smClose(pMgmt); return -1; } +#endif pOutput->pMgmt = pMgmt; return 0; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 0ff2537e4c..872577cf28 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -571,10 +571,12 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("vnode-vnodes", "initialized"); +#ifdef WINDOWS if (udfcOpen() != 0) { dError("failed to open udfc in vnode"); goto _OVER; } +#endif code = 0; diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index a34002161d..a8f871dc96 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -198,8 +198,10 @@ void dmCleanup() { monCleanup(); syncCleanUp(); walCleanUp(); - udfcClose(); +#ifdef WINDOWS + udfcClose(); udfStopUdfd(); +#endif taosStopCacheRefreshWorker(); dmDiskClose(); dInfo("dnode env is cleaned up"); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index a41447edf3..9c3beea2d8 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -4418,6 +4418,10 @@ static int32_t checkDbRetentionsOption(STranslateContext* pCxt, SNodeList* pRete return TSDB_CODE_SUCCESS; } +#ifdef WINDOWS + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); +#endif + if (LIST_LENGTH(pRetentions) > 3) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION, "Invalid option retentions"); } @@ -5867,6 +5871,9 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS } static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { +#ifdef WINDOWS + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); +#endif int32_t code = checkCreateSmaIndex(pCxt, pStmt); pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq)); if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY; @@ -7052,6 +7059,9 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* } static int32_t translateCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { +#ifdef WINDOWS + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); +#endif SCMCreateStreamReq createReq = {0}; int32_t code = checkCreateStream(pCxt, pStmt); @@ -7201,6 +7211,9 @@ static int32_t readFromFile(char* pName, int32_t* len, char** buf) { } static int32_t translateCreateFunction(STranslateContext* pCxt, SCreateFunctionStmt* pStmt) { +#ifdef WINDOWS + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); +#endif if (fmIsBuiltinFunc(pStmt->funcName)) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_FUNCTION_NAME); } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index b0b407e2a5..74352f2799 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -570,6 +570,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GET_META_ERROR, "Fail to get table i TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, "System table not allowed") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, "System table not allowed") +TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platformXX") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error") //planner From a576a3b972ff69adde0b88907be7f37afadc7201 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 06:23:27 +0000 Subject: [PATCH 64/81] rebuild index at tag0 --- tests/script/tsim/sma/drop_sma.sim | 65 +++++++++++++++++++++--------- 1 file changed, 47 insertions(+), 18 deletions(-) diff --git a/tests/script/tsim/sma/drop_sma.sim b/tests/script/tsim/sma/drop_sma.sim index 8fd8ebdcfd..fcf48f2b36 100644 --- a/tests/script/tsim/sma/drop_sma.sim +++ b/tests/script/tsim/sma/drop_sma.sim @@ -52,19 +52,35 @@ sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) in print --> show sma sql show indexes from stb from d1; -if $rows != 1 then +if $rows != 2 then return -1 endi -if $data[0][0] != sma_index_name1 then - return -1 -endi -if $data[0][1] != d1 then - return -1 -endi -if $data[0][2] != stb then - return -1 + +if $data[0][6] == tag_index then + if $data[1][0] != sma_index_name1 then + return -1 + endi + if $data[1][1] != d1 then + return -1 + endi + if $data[1][2] != stb then + return -1 + endi +else + if $data[0][0] != sma_index_name1 then + return -1 + endi + if $data[0][1] != d1 then + return -1 + endi + if $data[0][2] != stb then + return -1 + endi endi + + + print --> drop stb sql drop table stb; @@ -78,17 +94,30 @@ sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) in print --> show sma sql show indexes from stb from d1; -if $rows != 1 then +if $rows != 2 then return -1 endi -if $data[0][0] != sma_index_name1 then - return -1 -endi -if $data[0][1] != d1 then - return -1 -endi -if $data[0][2] != stb then - return -1 + +if $data[0][6] == tag_index then + if $data[1][0] != sma_index_name1 then + return -1 + endi + if $data[1][1] != d1 then + return -1 + endi + if $data[1][2] != stb then + return -1 + endi +else + if $data[0][0] != sma_index_name1 then + return -1 + endi + if $data[0][1] != d1 then + return -1 + endi + if $data[0][2] != stb then + return -1 + endi endi print --> drop stb From 27db6cfd676bc238a640adfb5471c8b6df954c8c Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 15 Aug 2023 14:25:31 +0800 Subject: [PATCH 65/81] enh: disable udf/stream for mnd on windows --- source/dnode/mnode/impl/src/mndDb.c | 6 ++++++ source/dnode/mnode/impl/src/mndFunc.c | 4 ++++ source/dnode/mnode/impl/src/mndSma.c | 4 ++++ source/dnode/mnode/impl/src/mndStream.c | 4 ++++ 4 files changed, 18 insertions(+) diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 1bd629e56f..fdefe9e5b1 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -668,6 +668,12 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { } mInfo("db:%s, start to create, vgroups:%d", createReq.db, createReq.numOfVgroups); +#ifdef WINDOWS + if (taosArrayGetSize(createReq.pRetensions) > 0) { + code = TSDB_CODE_PAR_INVALID_PLATFORM; + goto _OVER; + } +#endif if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_DB, NULL) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index 4ffc7a20c2..5f4ac830cd 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -361,6 +361,10 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) { } mInfo("func:%s, start to create, size:%d", createReq.name, createReq.codeLen); +#ifdef WINDOWS + code = TSDB_CODE_PAR_INVALID_PLATFORM; + goto _OVER; +#endif if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_FUNC) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index b84297f6bf..ff3f66efaf 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -655,6 +655,10 @@ _OVER: } static int32_t mndCheckCreateSmaReq(SMCreateSmaReq *pCreate) { +#ifdef WINDOWS + terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + return -1; +#endif terrno = TSDB_CODE_MND_INVALID_SMA_OPTION; if (pCreate->name[0] == 0) return -1; if (pCreate->stb[0] == 0) return -1; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index a0d53ec780..d6bb8c167f 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -253,6 +253,10 @@ static void mndShowStreamTrigger(char *dst, SStreamObj *pStream) { } static int32_t mndCheckCreateStreamReq(SCMCreateStreamReq *pCreate) { +#ifdef WINDOWS + terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + return -1; +#endif if (pCreate->name[0] == 0 || pCreate->sql == NULL || pCreate->sql[0] == 0 || pCreate->sourceDB[0] == 0 || pCreate->targetStbFullName[0] == 0) { terrno = TSDB_CODE_MND_INVALID_STREAM_OPTION; From 8639c22cc0cd5e684710fe4dfda644a6e0362e54 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 14:53:58 +0800 Subject: [PATCH 66/81] vnode, common: USE_COS def --- source/common/CMakeLists.txt | 8 ++++++++ source/dnode/vnode/CMakeLists.txt | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index 356ea2be1c..b010467f20 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -16,6 +16,14 @@ ENDIF () IF (TD_STORAGE) ADD_DEFINITIONS(-D_STORAGE) TARGET_LINK_LIBRARIES(common PRIVATE storage) + + IF(${TD_LINUX}) + IF(${BUILD_WITH_COS}) + add_definitions(-DUSE_COS) + ENDIF(${BUILD_WITH_COS}) + + ENDIF(${TD_LINUX}) + ENDIF () target_include_directories( diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 052b6be37f..c70df86e20 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -189,6 +189,10 @@ target_include_directories( PUBLIC "$ENV{HOME}/.cos-local.1/include" ) +if(${BUILD_WITH_COS}) + add_definitions(-DUSE_COS) +endif(${BUILD_WITH_COS}) + endif(${TD_LINUX}) IF (TD_GRANT) From bd758e0269786711de840303a921b93b4e097d2c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 15:14:34 +0800 Subject: [PATCH 67/81] retention: remove old files last --- source/dnode/vnode/src/inc/vndCos.h | 2 +- source/dnode/vnode/src/tsdb/tsdbRetention.c | 23 +++++++++++---------- source/dnode/vnode/src/vnd/vnodeCos.c | 11 ++++++++-- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index f6db7f096e..cf2c5eb441 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -26,7 +26,7 @@ extern int8_t tsS3Enabled; int32_t s3Init(); void s3CleanUp(); -void s3PutObjectFromFile(const char *file, const char *object); +int32_t s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); bool s3Get(const char *object_name, const char *path); diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index ebe20c0e85..46a5d19a1a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -114,7 +114,8 @@ static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile TSDB_CHECK_CODE(code, lino, _exit); char *object_name = taosDirEntryBaseName(fname); - s3PutObjectFromFile(from->fname, object_name); + code = s3PutObjectFromFile(from->fname, object_name); + TSDB_CHECK_CODE(code, lino, _exit); taosCloseFile(&fdFrom); @@ -178,16 +179,6 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const int32_t lino = 0; STFileOp op = {0}; - // remove old - op = (STFileOp){ - .optype = TSDB_FOP_REMOVE, - .fid = fobj->f->fid, - .of = fobj->f[0], - }; - - code = TARRAY2_APPEND(rtner->fopArr, op); - TSDB_CHECK_CODE(code, lino, _exit); - // create new op = (STFileOp){ .optype = TSDB_FOP_CREATE, @@ -213,6 +204,16 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const code = tsdbCopyFileS3(rtner, fobj, &op.nf); TSDB_CHECK_CODE(code, lino, _exit); + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + _exit: if (code) { TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index b28b7ad747..02021831bf 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -51,7 +51,8 @@ static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { options->ctl = cos_http_controller_create(options->pool, 0); } -void s3PutObjectFromFile(const char *file_str, const char *object_str) { +int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) { + int32_t code = 0; cos_pool_t *p = NULL; int is_cname = 0; cos_status_t *s = NULL; @@ -76,6 +77,12 @@ void s3PutObjectFromFile(const char *file_str, const char *object_str) { log_status(s); cos_pool_destroy(p); + + if (s->code != 200) { + return code = s->code; + } + + return code; } void s3DeleteObjects(const char *object_name[], int nobject) { @@ -300,7 +307,7 @@ long s3Size(const char *object_name) { int32_t s3Init() { return 0; } void s3CleanUp() {} -void s3PutObjectFromFile(const char *file, const char *object) {} +int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; } void s3DeleteObjects(const char *object_name[], int nobject) {} bool s3Exists(const char *object_name) { return false; } bool s3Get(const char *object_name, const char *path) { return false; } From 0f0d0953cdbed4eaeed80e114cf2de6dccbd9290 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 07:26:53 +0000 Subject: [PATCH 68/81] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndIndex.c | 4 +- tests/parallel_test/cases.task | 3 ++ tests/script/tsim/tagindex/add_index.sim | 48 +++++++++++++++++-- .../tsim/tagindex/sma_and_tag_index.sim | 22 +++++---- 4 files changed, 63 insertions(+), 14 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndIndex.c b/source/dnode/mnode/impl/src/mndIndex.c index b56ea320cc..2e78116a86 100644 --- a/source/dnode/mnode/impl/src/mndIndex.c +++ b/source/dnode/mnode/impl/src/mndIndex.c @@ -696,8 +696,8 @@ int8_t mndCheckIndexNameByTagName(SMnode *pMnode, SIdxObj *pIdxObj) { continue; } if (strncmp(pIdxObj->colName, pIdx->colName, TSDB_COL_NAME_LEN) == 0) { + sdbCancelFetch(pSdb, pIter); sdbRelease(pSdb, pIdx); - sdbCancelFetch(pSdb, pIdx); exist = 1; break; } @@ -854,8 +854,8 @@ int32_t mndDropIdxsByStb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p if (pIdx->stbUid == pStb->uid) { if (mndSetDropIdxCommitLogs(pMnode, pTrans, pIdx) != 0) { + sdbCancelFetch(pSdb, pIter); sdbRelease(pSdb, pIdx); - sdbCancelFetch(pSdb, pIdx); return -1; } } diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index a946a7feaf..e81339d705 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1204,6 +1204,9 @@ ,,y,script,./test.sh -f tsim/tag/drop_tag.sim ,,y,script,./test.sh -f tsim/tag/tbNameIn.sim ,,y,script,./test.sh -f tmp/monitor.sim +,,y,script,./test.sh -f tsim/tagindex/add_index.sim +,,y,script,./test.sh -f tsim/tagindex/sma_and_tag_index.sim + #develop test ,,n,develop-test,python3 ./test.py -f 2-query/table_count_scan.py diff --git a/tests/script/tsim/tagindex/add_index.sim b/tests/script/tsim/tagindex/add_index.sim index cfbec90542..a6e9cae670 100644 --- a/tests/script/tsim/tagindex/add_index.sim +++ b/tests/script/tsim/tagindex/add_index.sim @@ -7,7 +7,7 @@ print ======== step0 $dbPrefix = ta_3_db $tbPrefix = ta_3_tb $mtPrefix = ta_3_mt -$tbNum = 500 +$tbNum = 50 $rowNum = 20 $totalNum = 200 @@ -48,12 +48,16 @@ while $i < $tbNum $i = $i + 1 endw - +sql_error create index ti1 on $mtPrefix (t1) sql create index ti2 on $mtPrefix (t2) sql create index ti5 on $mtPrefix (t5) print ==== test name conflict # + +sql_error create index ti1 on $mtPrefix(t1) +sql_error create index ti11 on $mtPrefix(t1) + sql_error create index ti3 on $mtPrefix(t2) sql_error create index ti2 on $mtPrefix(t2) @@ -73,6 +77,15 @@ while $i < $tbNum $i = $i + 1 endw +$i = 0 +while $i < $tbNum + sql select * from $mtPrefix where t1= $i ; + if $rows != 1 then + return -1 + endi + $i = $i + 1 +endw + print ===== test operator great equal @@ -250,7 +263,7 @@ endw print === show index sql select * from information_schema.ins_indexes -if $rows != 1 then +if $rows != 2 then return -1 endi @@ -259,12 +272,41 @@ print === drop index ti2 sql drop index ti2 print === drop not exist index + +sql select * from information_schema.ins_indexes +if $rows != 1 then + return -1 +endi + +sql drop index $data[0][0] + +if $rows != 0 then + return -1 +endi + + sql_error drop index t2 sql_error drop index t3 +sql create index ti0 on $mtPrefix (t1) + +$i = $interval +while $i < $limit + sql select * from $mtPrefix where t1 <= $i ; + + $tmp = $i - $interval + $tmp = $tmp + 1 + if $rows != $tmp then + return -1 + endi + $i = $i + 1 +endw sql_error create index ti0 on $mtPrefix (t1) +sql_error create index ti2 on $mtPrefix (t1) + + sql_error create index t2i on ta_3_tb17 (t2) diff --git a/tests/script/tsim/tagindex/sma_and_tag_index.sim b/tests/script/tsim/tagindex/sma_and_tag_index.sim index b15d22d439..e7e4682810 100644 --- a/tests/script/tsim/tagindex/sma_and_tag_index.sim +++ b/tests/script/tsim/tagindex/sma_and_tag_index.sim @@ -69,7 +69,7 @@ sql create sma index smat2i on $mtPrefix function(max(c1)) interval(6m,10s) slid sql select * from information_schema.ins_indexes -if $rows != 2 then +if $rows != 3 then return -1 endi @@ -84,7 +84,7 @@ while $i < 5 endw sql select * from information_schema.ins_indexes -if $rows != 6 then +if $rows != 7 then return -1 endi @@ -114,13 +114,13 @@ sql use $dbPrefix sql create table if not exists $mtPrefix (ts timestamp, c1 int) tags (t1 int, t2 int, t3 int, t4 int, t5 int) sql create index tagt2i on $mtPrefix (t2) sql select * from information_schema.ins_indexes -if $rows != 1 then +if $rows != 2 then return -1 endi sql alter table $mtPrefix drop tag t2 sql select * from information_schema.ins_indexes -if $rows != 0 then +if $rows != 1 then return -1 endi @@ -128,18 +128,22 @@ endi print ==== rename tag name, and update index colName sql create index tagt3i on $mtPrefix (t3) sql select * from information_schema.ins_indexes -if $rows != 1 then +if $rows != 2 then return -1 endi sql alter table $mtPrefix rename tag t3 txxx sql select * from information_schema.ins_indexes -if $rows != 1 then +if $rows != 2 then return -1 endi -if $data05 != txxx then - return -1 +if $data05 == txxx then + print "manual created index" +elif $data15 == txxx then + print "auto created index at tag0" +else + return -1; endi @@ -153,7 +157,7 @@ sql create table if not exists $mtPrefix (ts timestamp, c1 int) tags (t1 int, t2 sql create index tagt3i on $mtPrefix (t3) sql select * from information_schema.ins_indexes -if $rows != 2 then +if $rows != 4 then return -1 endi From c4e5cfd2fbb0d2a85d141ee57d6e03b095c31061 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 15:40:40 +0800 Subject: [PATCH 69/81] retention: return code from copy --- source/dnode/vnode/src/tsdb/tsdbRetention.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index 46a5d19a1a..267e8b4117 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -179,6 +179,16 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const int32_t lino = 0; STFileOp op = {0}; + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + // create new op = (STFileOp){ .optype = TSDB_FOP_CREATE, @@ -204,16 +214,6 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const code = tsdbCopyFileS3(rtner, fobj, &op.nf); TSDB_CHECK_CODE(code, lino, _exit); - // remove old - op = (STFileOp){ - .optype = TSDB_FOP_REMOVE, - .fid = fobj->f->fid, - .of = fobj->f[0], - }; - - code = TARRAY2_APPEND(rtner->fopArr, op); - TSDB_CHECK_CODE(code, lino, _exit); - _exit: if (code) { TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); From 8a84dce6417618332e7c22a01a7df5c0421367ad Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 15 Aug 2023 16:04:30 +0800 Subject: [PATCH 70/81] enh: disable udf/stream/rsma/tsma for windows --- source/dnode/mnode/impl/src/mndDb.c | 3 +-- source/dnode/mnode/impl/src/mndFunc.c | 5 ++--- source/dnode/mnode/impl/src/mndSma.c | 9 ++++----- source/dnode/mnode/impl/src/mndStream.c | 9 ++++----- source/libs/parser/src/parTranslater.c | 13 ------------- 5 files changed, 11 insertions(+), 28 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index fdefe9e5b1..ada80b8370 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -666,14 +666,13 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; } - - mInfo("db:%s, start to create, vgroups:%d", createReq.db, createReq.numOfVgroups); #ifdef WINDOWS if (taosArrayGetSize(createReq.pRetensions) > 0) { code = TSDB_CODE_PAR_INVALID_PLATFORM; goto _OVER; } #endif + mInfo("db:%s, start to create, vgroups:%d", createReq.db, createReq.numOfVgroups); if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_DB, NULL) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index 5f4ac830cd..dc75a311e7 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -359,12 +359,11 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; } - - mInfo("func:%s, start to create, size:%d", createReq.name, createReq.codeLen); #ifdef WINDOWS - code = TSDB_CODE_PAR_INVALID_PLATFORM; + terrno = TSDB_CODE_PAR_INVALID_PLATFORM; goto _OVER; #endif + mInfo("func:%s, start to create, size:%d", createReq.name, createReq.codeLen); if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_FUNC) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index ff3f66efaf..55169a5d56 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -655,10 +655,6 @@ _OVER: } static int32_t mndCheckCreateSmaReq(SMCreateSmaReq *pCreate) { -#ifdef WINDOWS - terrno = TSDB_CODE_PAR_INVALID_PLATFORM; - return -1; -#endif terrno = TSDB_CODE_MND_INVALID_SMA_OPTION; if (pCreate->name[0] == 0) return -1; if (pCreate->stb[0] == 0) return -1; @@ -709,7 +705,10 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; } - +#ifdef WINDOWS + terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + goto _OVER; +#endif mInfo("sma:%s, start to create", createReq.name); if (mndCheckCreateSmaReq(&createReq) != 0) { goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index d6bb8c167f..7fd2444ab2 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -253,10 +253,6 @@ static void mndShowStreamTrigger(char *dst, SStreamObj *pStream) { } static int32_t mndCheckCreateStreamReq(SCMCreateStreamReq *pCreate) { -#ifdef WINDOWS - terrno = TSDB_CODE_PAR_INVALID_PLATFORM; - return -1; -#endif if (pCreate->name[0] == 0 || pCreate->sql == NULL || pCreate->sql[0] == 0 || pCreate->sourceDB[0] == 0 || pCreate->targetStbFullName[0] == 0) { terrno = TSDB_CODE_MND_INVALID_STREAM_OPTION; @@ -696,7 +692,10 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; } - +#ifdef WINDOWS + terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + goto _OVER; +#endif mInfo("stream:%s, start to create, sql:%s", createStreamReq.name, createStreamReq.sql); if (mndCheckCreateStreamReq(&createStreamReq) != 0) { diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 9c3beea2d8..a41447edf3 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -4418,10 +4418,6 @@ static int32_t checkDbRetentionsOption(STranslateContext* pCxt, SNodeList* pRete return TSDB_CODE_SUCCESS; } -#ifdef WINDOWS - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); -#endif - if (LIST_LENGTH(pRetentions) > 3) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION, "Invalid option retentions"); } @@ -5871,9 +5867,6 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS } static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { -#ifdef WINDOWS - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); -#endif int32_t code = checkCreateSmaIndex(pCxt, pStmt); pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq)); if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY; @@ -7059,9 +7052,6 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* } static int32_t translateCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { -#ifdef WINDOWS - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); -#endif SCMCreateStreamReq createReq = {0}; int32_t code = checkCreateStream(pCxt, pStmt); @@ -7211,9 +7201,6 @@ static int32_t readFromFile(char* pName, int32_t* len, char** buf) { } static int32_t translateCreateFunction(STranslateContext* pCxt, SCreateFunctionStmt* pStmt) { -#ifdef WINDOWS - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); -#endif if (fmIsBuiltinFunc(pStmt->funcName)) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_FUNCTION_NAME); } From 84e472ad03b71aabd88670184d1de52c9b85dd3e Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 15 Aug 2023 16:10:54 +0800 Subject: [PATCH 71/81] enhance: tag cond col list only once and tag scan derive from scan --- include/libs/nodes/plannodes.h | 10 +---- source/libs/command/src/explain.c | 16 +++---- source/libs/executor/inc/executorInt.h | 7 +++ source/libs/executor/src/scanoperator.c | 36 +++++++-------- source/libs/nodes/src/nodesCloneFuncs.c | 9 +--- source/libs/nodes/src/nodesCodeFuncs.c | 48 ++------------------ source/libs/nodes/src/nodesMsgFuncs.c | 58 +++---------------------- 7 files changed, 41 insertions(+), 143 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 3e24e417fc..4529520ace 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -336,15 +336,7 @@ typedef struct SScanPhysiNode { } SScanPhysiNode; typedef struct STagScanPhysiNode { - // SScanPhysiNode scan; //TODO? - SPhysiNode node; - SNodeList* pScanCols; - SNodeList* pScanPseudoCols; - uint64_t uid; // unique id of the table - uint64_t suid; - int8_t tableType; - SName tableName; - bool groupOrderScan; + SScanPhysiNode scan; bool onlyMetaCtbIdx; //no tbname, tag index not used. } STagScanPhysiNode; diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index e917de33dd..e167b31ef8 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -291,17 +291,17 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i switch (pNode->type) { case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: { STagScanPhysiNode *pTagScanNode = (STagScanPhysiNode *)pNode; - EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->tableName.tname); + EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->scan.tableName.tname); EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); if (pResNode->pExecInfo) { QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - if (pTagScanNode->pScanPseudoCols) { - EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->pScanPseudoCols->length); + if (pTagScanNode->scan.pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->scan.pScanPseudoCols->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->scan.node.pOutputDataBlockDesc->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -309,11 +309,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i if (verbose) { EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, - nodesGetOutputNumFromSlotList(pTagScanNode->node.pOutputDataBlockDesc->pSlots)); + nodesGetOutputNumFromSlotList(pTagScanNode->scan.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); - EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->outputRowSize); - EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->node.pLimit); - EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->node.pSlimit); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->scan.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->scan.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->scan.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index cb066d809c..dad15dc6bc 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -251,6 +251,12 @@ typedef struct STableMergeScanInfo { SSortExecInfo sortExecInfo; } STableMergeScanInfo; +typedef struct STagScanFilterContext { + SHashObj* colHash; + int32_t index; + SArray* cInfoList; +} STagScanFilterContext; + typedef struct STagScanInfo { SColumnInfo* pCols; SSDataBlock* pRes; @@ -263,6 +269,7 @@ typedef struct STagScanInfo { void* pCtbCursor; SNode* pTagCond; SNode* pTagIndexCond; + STagScanFilterContext filterCtx; SArray* aUidTags; // SArray SArray* aFilterIdxs; // SArray SStorageAPI* pStorageAPI; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71352b1c6e..ef28875be4 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2719,12 +2719,6 @@ static int32_t tagScanCreateResultData(SDataType* pType, int32_t numOfRows, SSca return TSDB_CODE_SUCCESS; } -typedef struct STagScanFilterContext { - SHashObj* colHash; - int32_t index; - SArray* cInfoList; -} STagScanFilterContext; - static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { SColumnNode* pSColumnNode = NULL; if (QUERY_NODE_COLUMN == nodeType((*pNode))) { @@ -2767,17 +2761,11 @@ static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { } -static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI) { +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI, STagScanInfo* pInfo) { int32_t code = 0; int32_t numOfTables = taosArrayGetSize(aUidTags); - STagScanFilterContext ctx = {0}; - ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); - ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); - - nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&ctx); - - SSDataBlock* pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); + SSDataBlock* pResBlock = createTagValBlockForFilter(pInfo->filterCtx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); @@ -2801,8 +2789,7 @@ static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aF blockDataDestroy(pResBlock); taosArrayDestroy(pBlockList); - taosHashCleanup(ctx.colHash); - taosArrayDestroy(ctx.cInfoList); + } static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { @@ -2911,7 +2898,7 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { bool ignoreFilterIdx = true; if (pInfo->pTagCond != NULL) { ignoreFilterIdx = false; - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI); + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI, pInfo); } else { ignoreFilterIdx = true; } @@ -2991,7 +2978,8 @@ static void destroyTagScanOperatorInfo(void* param) { if (pInfo->pCtbCursor != NULL) { pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); } - + taosHashCleanup(pInfo->filterCtx.colHash); + taosArrayDestroy(pInfo->filterCtx.cInfoList); taosArrayDestroy(pInfo->aFilterIdxs); taosArrayDestroyEx(pInfo->aUidTags, tagScanFreeUidTag); @@ -3001,8 +2989,9 @@ static void destroyTagScanOperatorInfo(void* param) { taosMemoryFreeClear(param); } -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pTagScanNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { + SScanPhysiNode* pPhyNode = (STagScanPhysiNode*)pTagScanNode; STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -3040,11 +3029,16 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); - if (pPhyNode->onlyMetaCtbIdx) { + if (pTagScanNode->onlyMetaCtbIdx) { pInfo->aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); pInfo->aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + pInfo->filterCtx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + pInfo->filterCtx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + if (pInfo->pTagCond != NULL) { + nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&pInfo->filterCtx); + } } - __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; + __optr_fn_t tagScanNextFn = (pTagScanNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 965af41fa7..d3cbaac5e1 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -564,14 +564,7 @@ static int32_t physiScanCopy(const SScanPhysiNode* pSrc, SScanPhysiNode* pDst) { } static int32_t physiTagScanCopy(const STagScanPhysiNode* pSrc, STagScanPhysiNode* pDst) { - COPY_BASE_OBJECT_FIELD(node, physiNodeCopy); - CLONE_NODE_LIST_FIELD(pScanCols); - CLONE_NODE_LIST_FIELD(pScanPseudoCols); - COPY_SCALAR_FIELD(uid); - COPY_SCALAR_FIELD(suid); - COPY_SCALAR_FIELD(tableType); - COPY_OBJECT_FIELD(tableName, sizeof(SName)); - COPY_SCALAR_FIELD(groupOrderScan); + COPY_BASE_OBJECT_FIELD(scan, physiScanCopy); COPY_SCALAR_FIELD(onlyMetaCtbIdx); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 4dfc55c0fa..64a4e0e7d3 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1630,28 +1630,8 @@ static const char* jkTagScanPhysiOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; - int32_t code = physicPlanNodeToJson(pObj, pJson); - if (TSDB_CODE_SUCCESS == code) { - code = nodeListToJson(pJson, jkScanPhysiPlanScanCols, pNode->pScanCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = nodeListToJson(pJson, jkScanPhysiPlanScanPseudoCols, pNode->pScanPseudoCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableId, pNode->uid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanSTableId, pNode->suid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableType, pNode->tableType); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddObject(pJson, jkScanPhysiPlanTableName, nameToJson, &pNode->tableName); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddBoolToObject(pJson, jkScanPhysiPlanGroupOrderScan, pNode->groupOrderScan); - } + int32_t code = physiScanNodeToJson(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkTagScanPhysiOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); } @@ -1661,28 +1641,8 @@ static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; - int32_t code = jsonToPhysicPlanNode(pJson, pObj); - if (TSDB_CODE_SUCCESS == code) { - code = jsonToNodeList(pJson, jkScanPhysiPlanScanCols, &pNode->pScanCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = jsonToNodeList(pJson, jkScanPhysiPlanScanPseudoCols, &pNode->pScanPseudoCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanTableId, &pNode->uid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanSTableId, &pNode->suid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetTinyIntValue(pJson, jkScanPhysiPlanTableType, &pNode->tableType); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonToObject(pJson, jkScanPhysiPlanTableName, jsonToName, &pNode->tableName); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetBoolValue(pJson, jkScanPhysiPlanGroupOrderScan, &pNode->groupOrderScan); - } + int32_t code = jsonToPhysiScanNode(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 4d1120861d..cade77fc17 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2004,42 +2004,15 @@ static int32_t msgToPhysiScanNode(STlvDecoder* pDecoder, void* pObj) { } enum { - PHY_TAG_SCAN_CODE_BASE_NODE = 1, - PHY_TAG_SCAN_CODE_SCAN_COLS, - PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, - PHY_TAG_SCAN_CODE_BASE_UID, - PHY_TAG_SCAN_CODE_BASE_SUID, - PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, - PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, - PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, + PHY_TAG_SCAN_CODE_SCAN = 1, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX }; static int32_t physiTagScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; - int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_NODE, physiNodeToMsg, &pNode->node); - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_COLS, nodeListToMsg, pNode->pScanCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, nodeListToMsg, pNode->pScanPseudoCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_UID, pNode->uid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_SUID, pNode->suid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeI8(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, pNode->tableType); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, nameToMsg, &pNode->tableName); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, pNode->groupOrderScan); - } + int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN, physiScanNodeToMsg, &pNode->scan); + if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX, pNode->onlyMetaCtbIdx); } @@ -2053,29 +2026,8 @@ static int32_t msgToPhysiTagScanNode(STlvDecoder* pDecoder, void* pObj) { STlv* pTlv = NULL; tlvForEach(pDecoder, pTlv, code) { switch (pTlv->type) { - case PHY_TAG_SCAN_CODE_BASE_NODE: - code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node); - break; - case PHY_TAG_SCAN_CODE_SCAN_COLS: - code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanCols); - break; - case PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS: - code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanPseudoCols); - break; - case PHY_TAG_SCAN_CODE_BASE_UID: - code = tlvDecodeU64(pTlv, &pNode->uid); - break; - case PHY_TAG_SCAN_CODE_BASE_SUID: - code = tlvDecodeU64(pTlv, &pNode->suid); - break; - case PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE: - code = tlvDecodeI8(pTlv, &pNode->tableType); - break; - case PHY_TAG_SCAN_CODE_BASE_TABLE_NAME: - code = tlvDecodeObjFromTlv(pTlv, msgToName, &pNode->tableName); - break; - case PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN: - code = tlvDecodeBool(pTlv, &pNode->groupOrderScan); + case PHY_TAG_SCAN_CODE_SCAN: + code = tlvDecodeObjFromTlv(pTlv, msgToPhysiScanNode, &pNode->scan); break; case PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX: code = tlvDecodeBool(pTlv, &pNode->onlyMetaCtbIdx); From 242bf77f4c1d6e811b5b41a872eff91f3a5c0985 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 15 Aug 2023 16:13:51 +0800 Subject: [PATCH 72/81] chore: code optimization --- include/util/taoserror.h | 2 +- source/dnode/mnode/impl/src/mndDb.c | 2 +- source/dnode/mnode/impl/src/mndFunc.c | 2 +- source/dnode/mnode/impl/src/mndSma.c | 2 +- source/dnode/mnode/impl/src/mndStream.c | 2 +- source/util/src/terror.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 75ab916230..a5081f2c7d 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -191,6 +191,7 @@ int32_t* taosGetErrno(); // #define TSDB_CODE_MND_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0313) // 2.x // #define TSDB_CODE_MND_FAILED_TO_INIT_STEP TAOS_DEF_ERROR_CODE(0, 0x0314) // 2.x #define TSDB_CODE_MND_USER_DISABLED TAOS_DEF_ERROR_CODE(0, 0x0315) +#define TSDB_CODE_MND_INVALID_PLATFORM TAOS_DEF_ERROR_CODE(0, 0x0316) // mnode-sdb #define TSDB_CODE_SDB_OBJ_ALREADY_THERE TAOS_DEF_ERROR_CODE(0, 0x0320) // internal @@ -707,7 +708,6 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667) #define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668) #define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669) -#define TSDB_CODE_PAR_INVALID_PLATFORM TAOS_DEF_ERROR_CODE(0, 0x2670) #define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF) //planner diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index ada80b8370..4f7e80c0a3 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -668,7 +668,7 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { } #ifdef WINDOWS if (taosArrayGetSize(createReq.pRetensions) > 0) { - code = TSDB_CODE_PAR_INVALID_PLATFORM; + terrno = TSDB_CODE_MND_INVALID_PLATFORM; goto _OVER; } #endif diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index dc75a311e7..5eb7abf026 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -360,7 +360,7 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) { goto _OVER; } #ifdef WINDOWS - terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + terrno = TSDB_CODE_MND_INVALID_PLATFORM; goto _OVER; #endif mInfo("func:%s, start to create, size:%d", createReq.name, createReq.codeLen); diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index 55169a5d56..e186a8742f 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -706,7 +706,7 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) { goto _OVER; } #ifdef WINDOWS - terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + terrno = TSDB_CODE_MND_INVALID_PLATFORM; goto _OVER; #endif mInfo("sma:%s, start to create", createReq.name); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 7fd2444ab2..427a52af3b 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -693,7 +693,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { goto _OVER; } #ifdef WINDOWS - terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + terrno = TSDB_CODE_MND_INVALID_PLATFORM; goto _OVER; #endif mInfo("stream:%s, start to create, sql:%s", createStreamReq.name, createStreamReq.sql); diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 74352f2799..466b9985e7 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -151,6 +151,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SHOWOBJ, "Data expired") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_QUERY_ID, "Invalid query id") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CONN_ID, "Invalid connection id") TAOS_DEFINE_ERROR(TSDB_CODE_MND_USER_DISABLED, "User is disabled") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_PLATFORM, "Unsupported feature on this platform") // mnode-sdb TAOS_DEFINE_ERROR(TSDB_CODE_SDB_OBJ_ALREADY_THERE, "Object already there") @@ -570,7 +571,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GET_META_ERROR, "Fail to get table i TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, "System table not allowed") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, "System table not allowed") -TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platformXX") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error") //planner From 7d1e4a9894c7c66b924a5dcbceecb708058e79f8 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 08:25:33 +0000 Subject: [PATCH 73/81] rebuild index at tag0 --- source/common/src/tdatablock.c | 78 +++++++++++++++++----------------- tests/parallel_test/cases.task | 2 +- 2 files changed, 41 insertions(+), 39 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 5188b1e27c..9f30d04b74 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -26,7 +26,7 @@ int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRo if (pColumnInfoData->reassigned) { int32_t totalSize = 0; for (int32_t row = 0; row < numOfRows; ++row) { - char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row]; + char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row]; int32_t colSize = 0; if (pColumnInfoData->info.type == TSDB_DATA_TYPE_JSON) { colSize = getJsonValueLen(pColData); @@ -142,7 +142,8 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const return 0; } -int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData) { +int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, + const char* pData) { int32_t type = pColumnInfoData->info.type; if (IS_VAR_DATA_TYPE(type)) { int32_t dataLen = 0; @@ -164,7 +165,6 @@ int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, return 0; } - static int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) { if (!IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { return TSDB_CODE_SUCCESS; @@ -188,16 +188,17 @@ static int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) } static int32_t doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t currentRow, const char* pData, - int32_t itemLen, int32_t numOfRows, bool trimValue) { + int32_t itemLen, int32_t numOfRows, bool trimValue) { if (pColumnInfoData->info.bytes < itemLen) { - uWarn("column/tag actual data len %d is bigger than schema len %d, trim it:%d", itemLen, pColumnInfoData->info.bytes, trimValue); + uWarn("column/tag actual data len %d is bigger than schema len %d, trim it:%d", itemLen, + pColumnInfoData->info.bytes, trimValue); if (trimValue) { itemLen = pColumnInfoData->info.bytes; } else { return TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER; } } - + size_t start = 1; // the first item @@ -230,8 +231,8 @@ static int32_t doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t cur return TSDB_CODE_SUCCESS; } -int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, - uint32_t numOfRows, bool trimValue) { +int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows, + bool trimValue) { int32_t len = pColumnInfoData->info.bytes; if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { len = varDataTLen(pData); @@ -262,7 +263,7 @@ static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, c uint8_t* p = (uint8_t*)pSource->nullbitmap; pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] &= (0B11111111 << shiftBits); // clear remind bits - pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] |= (p[0] >> remindBits); // copy remind bits + pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] |= (p[0] >> remindBits); // copy remind bits if (BitmapLen(numOfRow1) == BitmapLen(total)) { return; @@ -350,7 +351,7 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int pColumnInfoData->pData = tmp; if (BitmapLen(numOfRow1) < BitmapLen(finalNumOfRows)) { - char* btmp = taosMemoryRealloc(pColumnInfoData->nullbitmap, BitmapLen(finalNumOfRows)); + char* btmp = taosMemoryRealloc(pColumnInfoData->nullbitmap, BitmapLen(finalNumOfRows)); if (btmp == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -622,7 +623,7 @@ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) { if (pCol->reassigned && IS_VAR_DATA_TYPE(pCol->info.type)) { for (int32_t row = 0; row < numOfRows; ++row) { - char* pColData = pCol->pData + pCol->varmeta.offset[row]; + char* pColData = pCol->pData + pCol->varmeta.offset[row]; int32_t colSize = 0; if (pCol->info.type == TSDB_DATA_TYPE_JSON) { colSize = getJsonValueLen(pColData); @@ -698,8 +699,7 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) { return TSDB_CODE_SUCCESS; } -static bool colDataIsNNull(const SColumnInfoData* pColumnInfoData, int32_t startIndex, - uint32_t nRows) { +static bool colDataIsNNull(const SColumnInfoData* pColumnInfoData, int32_t startIndex, uint32_t nRows) { if (!pColumnInfoData->hasNull) { return false; } @@ -880,7 +880,6 @@ int32_t dataBlockCompar(const void* p1, const void* p2, const void* param) { } static int32_t blockDataAssign(SColumnInfoData* pCols, const SSDataBlock* pDataBlock, const int32_t* index) { - size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pDst = &pCols[i]; @@ -1131,6 +1130,7 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* if (tmp == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } + // memset(tmp, 0, numOfRows * pColumn->info.bytes); // copy back the existed data if (pColumn->pData != NULL) { @@ -1474,8 +1474,8 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int int end = nRows; while (start <= end) { int mid = start + (end - start) / 2; - //data size + var data type columns offset + fixed data type columns bitmap len - int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid); + // data size + var data type columns offset + fixed data type columns bitmap len + int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid); if (midSize > payloadSize) { result = mid; end = mid - 1; @@ -1669,7 +1669,7 @@ int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) { if (pColData->reassigned && IS_VAR_DATA_TYPE(pColData->info.type)) { for (int32_t row = 0; row < rows; ++row) { - char* pData = pColData->pData + pColData->varmeta.offset[row]; + char* pData = pColData->pData + pColData->varmeta.offset[row]; int32_t colSize = 0; if (pColData->info.type == TSDB_DATA_TYPE_JSON) { colSize = getJsonValueLen(pData); @@ -1772,7 +1772,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { // for debug char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) { - int32_t size = 2048*1024; + int32_t size = 2048 * 1024; *pDataBuf = taosMemoryCalloc(size, 1); char* dumpBuf = *pDataBuf; char pBuf[128] = {0}; @@ -1780,8 +1780,8 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) int32_t rows = pDataBlock->info.rows; int32_t len = 0; len += snprintf(dumpBuf + len, size - len, - "===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64 - "|rows:%" PRId64 "|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "|tbl:%s\n", + "===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64 "|rows:%" PRId64 + "|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "|tbl:%s\n", flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.id.groupId, pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version, pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey, pDataBlock->info.parTbName); @@ -2156,21 +2156,21 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { data += metaSize; dataLen += metaSize; - if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) { - colSizes[col] = 0; - for (int32_t row = 0; row < numOfRows; ++row) { - char* pColData = pColRes->pData + pColRes->varmeta.offset[row]; - int32_t colSize = 0; - if (pColRes->info.type == TSDB_DATA_TYPE_JSON) { - colSize = getJsonValueLen(pColData); - } else { - colSize = varDataTLen(pColData); - } - colSizes[col] += colSize; - dataLen += colSize; - memmove(data, pColData, colSize); - data += colSize; + if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) { + colSizes[col] = 0; + for (int32_t row = 0; row < numOfRows; ++row) { + char* pColData = pColRes->pData + pColRes->varmeta.offset[row]; + int32_t colSize = 0; + if (pColRes->info.type == TSDB_DATA_TYPE_JSON) { + colSize = getJsonValueLen(pColData); + } else { + colSize = varDataTLen(pColData); } + colSizes[col] += colSize; + dataLen += colSize; + memmove(data, pColData, colSize); + data += colSize; + } } else { colSizes[col] = colDataGetLength(pColRes, numOfRows); dataLen += colSizes[col]; @@ -2181,7 +2181,8 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { } colSizes[col] = htonl(colSizes[col]); -// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, htonl(colSizes[col]), colSizes[col]); + // uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, + // htonl(colSizes[col]), colSizes[col]); } *actualLen = dataLen; @@ -2283,7 +2284,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) { } void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList) { -// int32_t totalRows = pBlock->info.rows; + // int32_t totalRows = pBlock->info.rows; int32_t bmLen = BitmapLen(totalRows); char* pBitmap = NULL; int32_t maxRows = 0; @@ -2310,8 +2311,9 @@ void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList if (colDataIsNull_var(pDst, j)) { colDataSetNull_var(pDst, numOfRows); } else { - // fix address sanitizer error. p1 may point to memory that will change during realloc of colDataSetVal, first copy it to p2 - char* p1 = colDataGetVarData(pDst, j); + // fix address sanitizer error. p1 may point to memory that will change during realloc of colDataSetVal, first + // copy it to p2 + char* p1 = colDataGetVarData(pDst, j); int32_t len = 0; if (pDst->info.type == TSDB_DATA_TYPE_JSON) { len = getJsonValueLen(p1); diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index e81339d705..8345a1112e 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1205,7 +1205,7 @@ ,,y,script,./test.sh -f tsim/tag/tbNameIn.sim ,,y,script,./test.sh -f tmp/monitor.sim ,,y,script,./test.sh -f tsim/tagindex/add_index.sim -,,y,script,./test.sh -f tsim/tagindex/sma_and_tag_index.sim +,,n,script,./test.sh -f tsim/tagindex/sma_and_tag_index.sim #develop test From 1792bf306e9be53779425a59b60af7ef43242d96 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 16:54:26 +0800 Subject: [PATCH 74/81] vnode: fix cos cache evicting --- source/dnode/vnode/src/vnd/vnodeCos.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 02021831bf..4c76538eb2 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -212,19 +212,20 @@ static int32_t evictFileCompareAsce(const void *pLeft, const void *pRight) { void s3EvictCache(const char *path, long object_size) { SDiskSize disk_size = {0}; - if (taosGetDiskSize((char *)path, &disk_size) < 0) { + char dir_name[TSDB_FILENAME_LEN] = "\0"; + + tstrncpy(dir_name, path, TSDB_FILENAME_LEN); + taosDirName(dir_name); + + if (taosGetDiskSize((char *)dir_name, &disk_size) < 0) { terrno = TAOS_SYSTEM_ERROR(errno); vError("failed to get disk:%s size since %s", path, terrstr()); return; } - if (object_size >= disk_size.avail + 1 << 30) { + if (object_size >= disk_size.avail - (1 << 30)) { // evict too old files // 1, list data files' atime under dir(path) - char dir_name[TSDB_FILENAME_LEN] = "\0"; - tstrncpy(dir_name, path, TSDB_FILENAME_LEN); - taosDirName(dir_name); - tdbDirPtr pDir = taosOpenDir(dir_name); if (pDir == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); @@ -236,9 +237,14 @@ void s3EvictCache(const char *path, long object_size) { char *name = taosGetDirEntryName(pDirEntry); if (!strncmp(name + strlen(name) - 5, ".data", 5)) { SEvictFile e_file = {0}; + char entry_name[TSDB_FILENAME_LEN] = "\0"; + int dir_len = strlen(dir_name); - tstrncpy(e_file.name, name, TSDB_FILENAME_LEN); - taosStatFile(name, &e_file.size, NULL, &e_file.atime); + memcpy(e_file.name, dir_name, dir_len); + e_file.name[dir_len] = '/'; + memcpy(e_file.name + dir_len + 1, name, strlen(name)); + + taosStatFile(e_file.name, &e_file.size, NULL, &e_file.atime); taosArrayPush(evict_files, &e_file); } From 450d7e2d3c6cdebf2dc6fbf2a279666e45efd576 Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 15 Aug 2023 16:56:15 +0800 Subject: [PATCH 75/81] enhance: compilation error --- source/libs/nodes/src/nodesCodeFuncs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 64a4e0e7d3..48c9bf33dd 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1641,7 +1641,7 @@ static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; - int32_t code = jsonToPhysiScanNode(pObj, pJson); + int32_t code = jsonToPhysiScanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); From 49e4b11547c03cfd7ed5dc993d2fa6e42bab9a8b Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 15 Aug 2023 17:00:53 +0800 Subject: [PATCH 76/81] fix: fix compilation error --- source/libs/executor/src/scanoperator.c | 2 +- source/libs/nodes/test/nodesCloneTest.cpp | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ef28875be4..d7d97cc514 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2991,7 +2991,7 @@ static void destroyTagScanOperatorInfo(void* param) { SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pTagScanNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { - SScanPhysiNode* pPhyNode = (STagScanPhysiNode*)pTagScanNode; + SScanPhysiNode* pPhyNode = (SScanPhysiNode*)pTagScanNode; STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { diff --git a/source/libs/nodes/test/nodesCloneTest.cpp b/source/libs/nodes/test/nodesCloneTest.cpp index e1e99abab3..8b8893d317 100644 --- a/source/libs/nodes/test/nodesCloneTest.cpp +++ b/source/libs/nodes/test/nodesCloneTest.cpp @@ -199,9 +199,10 @@ TEST_F(NodesCloneTest, physiScan) { ASSERT_EQ(nodeType(pSrc), nodeType(pDst)); STagScanPhysiNode* pSrcNode = (STagScanPhysiNode*)pSrc; STagScanPhysiNode* pDstNode = (STagScanPhysiNode*)pDst; - ASSERT_EQ(pSrcNode->uid, pDstNode->uid); - ASSERT_EQ(pSrcNode->suid, pDstNode->suid); - ASSERT_EQ(pSrcNode->tableType, pDstNode->tableType); + ASSERT_EQ(pSrcNode->scan.uid, pDstNode->scan.uid); + ASSERT_EQ(pSrcNode->scan.suid, pDstNode->scan.suid); + ASSERT_EQ(pSrcNode->scan.tableType, pDstNode->scan.tableType); + ASSERT_EQ(pSrcNode->onlyMetaCtbIdx, pDstNode->onlyMetaCtbIdx); }); std::unique_ptr srcNode(nullptr, nodesDestroyNode); From ccce04ceb98ea22c9e6d67ced1237c10b2b0bda2 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 15 Aug 2023 17:01:45 +0800 Subject: [PATCH 77/81] enh: disable udf on windows --- source/dnode/mgmt/mgmt_dnode/src/dmInt.c | 2 +- source/dnode/mgmt/mgmt_qnode/src/qmInt.c | 2 +- source/dnode/mgmt/mgmt_snode/src/smInt.c | 2 +- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 2 +- source/dnode/mgmt/node_mgmt/src/dmEnv.c | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c index f59d04e618..ae62c74e03 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c @@ -59,7 +59,7 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { return -1; } -#ifdef WINDOWS +#ifndef WINDOWS if (udfStartUdfd(pMgmt->pData->dnodeId) != 0) { dError("failed to start udfd"); } diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c index 82bc2f36f0..657f15920a 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c @@ -57,7 +57,7 @@ static int32_t qmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("qnode-impl", "initialized"); -#ifdef WINDOWS +#ifndef WINDOWS if (udfcOpen() != 0) { dError("qnode can not open udfc"); qmClose(pMgmt); diff --git a/source/dnode/mgmt/mgmt_snode/src/smInt.c b/source/dnode/mgmt/mgmt_snode/src/smInt.c index 7607fcac61..58d4b6139b 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smInt.c +++ b/source/dnode/mgmt/mgmt_snode/src/smInt.c @@ -65,7 +65,7 @@ int32_t smOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("snode-worker", "initialized"); -#ifdef WINDOWS +#ifndef WINDOWS if (udfcOpen() != 0) { dError("failed to open udfc in snode"); smClose(pMgmt); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 872577cf28..2dd0130d56 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -571,7 +571,7 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("vnode-vnodes", "initialized"); -#ifdef WINDOWS +#ifndef WINDOWS if (udfcOpen() != 0) { dError("failed to open udfc in vnode"); goto _OVER; diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index a8f871dc96..f7e429f938 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -198,10 +198,10 @@ void dmCleanup() { monCleanup(); syncCleanUp(); walCleanUp(); -#ifdef WINDOWS +#ifndef WINDOWS udfcClose(); udfStopUdfd(); -#endif +#endif taosStopCacheRefreshWorker(); dmDiskClose(); dInfo("dnode env is cleaned up"); From 3b69736b29baff03808b1f0281728520e8be0df6 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 15 Aug 2023 19:59:13 +0800 Subject: [PATCH 78/81] fix: set precision of interval with value from table scan node --- source/libs/executor/src/executil.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index aa0c7945b0..0f62b4f0a0 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1677,6 +1677,7 @@ SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) { .intervalUnit = pTableScanNode->intervalUnit, .slidingUnit = pTableScanNode->slidingUnit, .offset = pTableScanNode->offset, + .precision = pTableScanNode->scan.node.pOutputDataBlockDesc->precision, }; return interval; From b54d6e298250fb62459903ca8c741036e180986b Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 16 Aug 2023 09:55:30 +0800 Subject: [PATCH 79/81] chore: set default value of tsStartUdfd false on windows --- source/common/src/tglobal.c | 6 +++++- source/dnode/mgmt/mgmt_dnode/src/dmInt.c | 2 -- source/dnode/mgmt/mgmt_qnode/src/qmInt.c | 2 -- source/dnode/mgmt/mgmt_snode/src/smInt.c | 2 -- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 2 -- source/dnode/mgmt/node_mgmt/src/dmEnv.c | 2 -- 6 files changed, 5 insertions(+), 11 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index a772efc33c..da2917f144 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -216,7 +216,11 @@ uint32_t tsCurRange = 100; // range char tsCompressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR // udf -bool tsStartUdfd = true; +#ifdef WINDOWS +bool tsStartUdfd = false; +#else +bool tsStartUdfd = true; +#endif // wal int64_t tsWalFsyncDataSizeLimit = (100 * 1024 * 1024L); diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c index ae62c74e03..09783a5ea9 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c @@ -59,11 +59,9 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { return -1; } -#ifndef WINDOWS if (udfStartUdfd(pMgmt->pData->dnodeId) != 0) { dError("failed to start udfd"); } -#endif pOutput->pMgmt = pMgmt; return 0; diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c index 657f15920a..3b425a0b49 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c @@ -57,13 +57,11 @@ static int32_t qmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("qnode-impl", "initialized"); -#ifndef WINDOWS if (udfcOpen() != 0) { dError("qnode can not open udfc"); qmClose(pMgmt); return -1; } -#endif if (qmStartWorker(pMgmt) != 0) { dError("failed to start qnode worker since %s", terrstr()); diff --git a/source/dnode/mgmt/mgmt_snode/src/smInt.c b/source/dnode/mgmt/mgmt_snode/src/smInt.c index 58d4b6139b..e222349767 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smInt.c +++ b/source/dnode/mgmt/mgmt_snode/src/smInt.c @@ -65,13 +65,11 @@ int32_t smOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("snode-worker", "initialized"); -#ifndef WINDOWS if (udfcOpen() != 0) { dError("failed to open udfc in snode"); smClose(pMgmt); return -1; } -#endif pOutput->pMgmt = pMgmt; return 0; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 2dd0130d56..0ff2537e4c 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -571,12 +571,10 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("vnode-vnodes", "initialized"); -#ifndef WINDOWS if (udfcOpen() != 0) { dError("failed to open udfc in vnode"); goto _OVER; } -#endif code = 0; diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index f7e429f938..65683e5061 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -198,10 +198,8 @@ void dmCleanup() { monCleanup(); syncCleanUp(); walCleanUp(); -#ifndef WINDOWS udfcClose(); udfStopUdfd(); -#endif taosStopCacheRefreshWorker(); dmDiskClose(); dInfo("dnode env is cleaned up"); From 777ece27ae1c47aec575791186eff416758ecc09 Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 16 Aug 2023 09:56:25 +0800 Subject: [PATCH 80/81] chore: code format --- source/dnode/mgmt/node_mgmt/src/dmEnv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index 65683e5061..a34002161d 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -198,7 +198,7 @@ void dmCleanup() { monCleanup(); syncCleanUp(); walCleanUp(); - udfcClose(); + udfcClose(); udfStopUdfd(); taosStopCacheRefreshWorker(); dmDiskClose(); From e587cc50e64af3ff7b3a7912da655f073a5fb7b6 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Thu, 10 Aug 2023 11:09:21 +0800 Subject: [PATCH 81/81] feat: optimize select agg_func partition by tag slimit --- include/libs/nodes/plannodes.h | 2 + source/libs/executor/src/executor.c | 9 +++- source/libs/nodes/src/nodesCodeFuncs.c | 14 ++++++ source/libs/nodes/src/nodesMsgFuncs.c | 12 +++++ source/libs/planner/inc/planInt.h | 10 +++- source/libs/planner/src/planOptimizer.c | 34 ++++++-------- source/libs/planner/src/planPhysiCreater.c | 10 +++- source/libs/planner/src/planSpliter.c | 10 +++- source/libs/planner/src/planUtil.c | 53 ++++++++++++++++++++-- 9 files changed, 124 insertions(+), 30 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 063318332a..4b6704df43 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -603,6 +603,8 @@ typedef struct SSubplan { SNode* pTagCond; SNode* pTagIndexCond; bool showRewrite; + int32_t rowsThreshold; + bool dynamicRowThreshold; } SSubplan; typedef enum EExplainMode { EXPLAIN_MODE_DISABLE = 1, EXPLAIN_MODE_STATIC, EXPLAIN_MODE_ANALYZE } EExplainMode; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 05767db286..b101a5916c 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -589,6 +589,10 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo int64_t st = taosGetTimestampUs(); int32_t blockIndex = 0; + int32_t rowsThreshold = pTaskInfo->pSubplan->rowsThreshold; + if (!pTaskInfo->pSubplan->dynamicRowThreshold || 4096 <= pTaskInfo->pSubplan->rowsThreshold) { + rowsThreshold = 4096; + } while ((pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot)) != NULL) { SSDataBlock* p = NULL; if (blockIndex >= taosArrayGetSize(pTaskInfo->pResultBlockList)) { @@ -606,10 +610,13 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo ASSERT(p->info.rows > 0); taosArrayPush(pResList, &p); - if (current >= 4096) { + if (current >= rowsThreshold) { break; } } + if (pTaskInfo->pSubplan->dynamicRowThreshold) { + pTaskInfo->pSubplan->rowsThreshold -= current; + } *hasMore = (pRes != NULL); uint64_t el = (taosGetTimestampUs() - st); diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index f25616065e..dc53dbb230 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2814,6 +2814,8 @@ static const char* jkSubplanDataSink = "DataSink"; static const char* jkSubplanTagCond = "TagCond"; static const char* jkSubplanTagIndexCond = "TagIndexCond"; static const char* jkSubplanShowRewrite = "ShowRewrite"; +static const char* jkSubplanRowsThreshold = "RowThreshold"; +static const char* jkSubplanDynamicRowsThreshold = "DyRowThreshold"; static int32_t subplanToJson(const void* pObj, SJson* pJson) { const SSubplan* pNode = (const SSubplan*)pObj; @@ -2852,6 +2854,12 @@ static int32_t subplanToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkSubplanShowRewrite, pNode->showRewrite); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkSubplanRowsThreshold, pNode->rowsThreshold); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkSubplanDynamicRowsThreshold, pNode->dynamicRowThreshold); + } return code; } @@ -2893,6 +2901,12 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkSubplanShowRewrite, &pNode->showRewrite); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkSubplanRowsThreshold, &pNode->rowsThreshold); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkSubplanDynamicRowsThreshold, &pNode->dynamicRowThreshold); + } return code; } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 20e829766d..37315c9f42 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -3538,6 +3538,12 @@ static int32_t subplanInlineToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeValueBool(pEncoder, pNode->showRewrite); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeValueI32(pEncoder, pNode->rowsThreshold); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeValueBool(pEncoder, pNode->dynamicRowThreshold); + } return code; } @@ -3587,6 +3593,12 @@ static int32_t msgToSubplanInline(STlvDecoder* pDecoder, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tlvDecodeValueBool(pDecoder, &pNode->showRewrite); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvDecodeValueI32(pDecoder, &pNode->rowsThreshold); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvDecodeValueBool(pDecoder, &pNode->dynamicRowThreshold); + } return code; } diff --git a/source/libs/planner/inc/planInt.h b/source/libs/planner/inc/planInt.h index 092fe17411..24d77cb9a4 100644 --- a/source/libs/planner/inc/planInt.h +++ b/source/libs/planner/inc/planInt.h @@ -43,8 +43,14 @@ int32_t splitLogicPlan(SPlanContext* pCxt, SLogicSubplan* pLogicSubplan); int32_t scaleOutLogicPlan(SPlanContext* pCxt, SLogicSubplan* pLogicSubplan, SQueryLogicPlan** pLogicPlan); int32_t createPhysiPlan(SPlanContext* pCxt, SQueryLogicPlan* pLogicPlan, SQueryPlan** pPlan, SArray* pExecNodeList); -bool isPartTableAgg(SAggLogicNode* pAgg); -bool isPartTableWinodw(SWindowLogicNode* pWindow); +bool isPartTableAgg(SAggLogicNode* pAgg); +bool isPartTagAgg(SAggLogicNode* pAgg); +bool isPartTableWinodw(SWindowLogicNode* pWindow); + +#define CLONE_LIMIT 1 +#define CLONE_SLIMIT 1 << 1 +#define CLONE_LIMIT_SLIMIT (CLONE_LIMIT | CLONE_SLIMIT) +bool cloneLimit(SLogicNode* pParent, SLogicNode* pChild, uint8_t cloneWhat); #ifdef __cplusplus } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 16440be511..c73da5e19d 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -368,8 +368,8 @@ static void scanPathOptSetGroupOrderScan(SScanLogicNode* pScan) { if (pScan->node.pParent && nodeType(pScan->node.pParent) == QUERY_NODE_LOGIC_PLAN_AGG) { SAggLogicNode* pAgg = (SAggLogicNode*)pScan->node.pParent; - bool withSlimit = pAgg->node.pSlimit != NULL || (pAgg->node.pParent && pAgg->node.pParent->pSlimit); - if (withSlimit && isPartTableAgg(pAgg)) { + bool withSlimit = pAgg->node.pSlimit != NULL; + if (withSlimit && (isPartTableAgg(pAgg) || isPartTagAgg(pAgg))) { pScan->groupOrderScan = pAgg->node.forceCreateNonBlockingOptr = true; } } @@ -2698,39 +2698,31 @@ static void swapLimit(SLogicNode* pParent, SLogicNode* pChild) { pParent->pLimit = NULL; } -static void cloneLimit(SLogicNode* pParent, SLogicNode* pChild) { - SLimitNode* pLimit = NULL; - if (pParent->pLimit) { - pChild->pLimit = nodesCloneNode(pParent->pLimit); - pLimit = (SLimitNode*)pChild->pLimit; - pLimit->limit += pLimit->offset; - pLimit->offset = 0; - } - - if (pParent->pSlimit) { - pChild->pSlimit = nodesCloneNode(pParent->pSlimit); - pLimit = (SLimitNode*)pChild->pSlimit; - pLimit->limit += pLimit->offset; - pLimit->offset = 0; - } -} - static bool pushDownLimitHow(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPushTo); static bool pushDownLimitTo(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPushTo) { switch (nodeType(pNodeLimitPushTo)) { case QUERY_NODE_LOGIC_PLAN_WINDOW: { SWindowLogicNode* pWindow = (SWindowLogicNode*)pNodeLimitPushTo; if (pWindow->winType != WINDOW_TYPE_INTERVAL) break; - cloneLimit(pNodeWithLimit, pNodeLimitPushTo); + cloneLimit(pNodeWithLimit, pNodeLimitPushTo, CLONE_LIMIT_SLIMIT); return true; } case QUERY_NODE_LOGIC_PLAN_FILL: case QUERY_NODE_LOGIC_PLAN_SORT: { - cloneLimit(pNodeWithLimit, pNodeLimitPushTo); + cloneLimit(pNodeWithLimit, pNodeLimitPushTo, CLONE_LIMIT_SLIMIT); SNode* pChild = NULL; FOREACH(pChild, pNodeLimitPushTo->pChildren) { pushDownLimitHow(pNodeLimitPushTo, (SLogicNode*)pChild); } return true; } + case QUERY_NODE_LOGIC_PLAN_AGG: { + if (nodeType(pNodeWithLimit) == QUERY_NODE_LOGIC_PLAN_PROJECT && + (isPartTagAgg((SAggLogicNode*)pNodeLimitPushTo) || isPartTableAgg((SAggLogicNode*)pNodeLimitPushTo))) { + // when part by tag, slimit will be cloned to agg, and it will be pipelined. + // The scan below will do scanning with group order + return cloneLimit(pNodeWithLimit, pNodeLimitPushTo, CLONE_SLIMIT); + } + break; + } case QUERY_NODE_LOGIC_PLAN_SCAN: if (nodeType(pNodeWithLimit) == QUERY_NODE_LOGIC_PLAN_PROJECT && pNodeWithLimit->pLimit) { swapLimit(pNodeWithLimit, pNodeLimitPushTo); diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 1b92dcd2e7..ee7aea9deb 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -872,12 +872,16 @@ static int32_t rewritePrecalcExpr(SPhysiPlanContext* pCxt, SNode* pNode, SNodeLi } static int32_t createAggPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SAggLogicNode* pAggLogicNode, - SPhysiNode** pPhyNode) { + SPhysiNode** pPhyNode, SSubplan* pSubPlan) { SAggPhysiNode* pAgg = (SAggPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pAggLogicNode, QUERY_NODE_PHYSICAL_PLAN_HASH_AGG); if (NULL == pAgg) { return TSDB_CODE_OUT_OF_MEMORY; } + if (pAgg->node.pSlimit) { + pSubPlan->dynamicRowThreshold = true; + pSubPlan->rowsThreshold = ((SLimitNode*)pAgg->node.pSlimit)->limit; + } pAgg->mergeDataBlock = (GROUP_ACTION_KEEP == pAggLogicNode->node.groupAction ? false : true); pAgg->groupKeyOptimized = pAggLogicNode->hasGroupKeyOptimized; @@ -1617,7 +1621,7 @@ static int32_t doCreatePhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode case QUERY_NODE_LOGIC_PLAN_JOIN: return createJoinPhysiNode(pCxt, pChildren, (SJoinLogicNode*)pLogicNode, pPhyNode); case QUERY_NODE_LOGIC_PLAN_AGG: - return createAggPhysiNode(pCxt, pChildren, (SAggLogicNode*)pLogicNode, pPhyNode); + return createAggPhysiNode(pCxt, pChildren, (SAggLogicNode*)pLogicNode, pPhyNode, pSubplan); case QUERY_NODE_LOGIC_PLAN_PROJECT: return createProjectPhysiNode(pCxt, pChildren, (SProjectLogicNode*)pLogicNode, pPhyNode); case QUERY_NODE_LOGIC_PLAN_EXCHANGE: @@ -1721,6 +1725,8 @@ static SSubplan* makeSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogicSubpl pSubplan->id = pLogicSubplan->id; pSubplan->subplanType = pLogicSubplan->subplanType; pSubplan->level = pLogicSubplan->level; + pSubplan->rowsThreshold = 4096; + pSubplan->dynamicRowThreshold = false; if (NULL != pCxt->pPlanCxt->pUser) { snprintf(pSubplan->user, sizeof(pSubplan->user), "%s", pCxt->pPlanCxt->pUser); } diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 3f6c73b4e5..868aee7147 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -867,8 +867,16 @@ static int32_t stbSplSplitAggNodeForPartTable(SSplitContext* pCxt, SStableSplitI static int32_t stbSplSplitAggNodeForCrossTable(SSplitContext* pCxt, SStableSplitInfo* pInfo) { SLogicNode* pPartAgg = NULL; int32_t code = stbSplCreatePartAggNode((SAggLogicNode*)pInfo->pSplitNode, &pPartAgg); + + if (TSDB_CODE_SUCCESS == code) { - code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartAgg); + // if slimit was pushed down to agg, agg will be pipelined mode, add sort merge before parent agg + if ((SAggLogicNode*)pInfo->pSplitNode->pSlimit) + code = stbSplCreateMergeNode(pCxt, NULL, pInfo->pSplitNode, NULL, pPartAgg, true); + else + code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartAgg); + } else { + nodesDestroyNode((SNode*)pPartAgg); } if (TSDB_CODE_SUCCESS == code) { code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, diff --git a/source/libs/planner/src/planUtil.c b/source/libs/planner/src/planUtil.c index 88086cde1d..9febe102f6 100644 --- a/source/libs/planner/src/planUtil.c +++ b/source/libs/planner/src/planUtil.c @@ -349,7 +349,7 @@ static bool stbHasPartTbname(SNodeList* pPartKeys) { return false; } -static SNodeList* stbSplGetPartKeys(SLogicNode* pNode) { +static SNodeList* stbGetPartKeys(SLogicNode* pNode) { if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) { return ((SScanLogicNode*)pNode)->pGroupTags; } else if (QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode)) { @@ -367,11 +367,58 @@ bool isPartTableAgg(SAggLogicNode* pAgg) { return stbHasPartTbname(pAgg->pGroupKeys) && stbNotSystemScan((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0)); } - return stbHasPartTbname(stbSplGetPartKeys((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0))); + return stbHasPartTbname(stbGetPartKeys((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0))); +} + +static bool stbHasPartTag(SNodeList* pPartKeys) { + if (NULL == pPartKeys) { + return false; + } + SNode* pPartKey = NULL; + FOREACH(pPartKey, pPartKeys) { + if (QUERY_NODE_GROUPING_SET == nodeType(pPartKey)) { + pPartKey = nodesListGetNode(((SGroupingSetNode*)pPartKey)->pParameterList, 0); + } + if ((QUERY_NODE_FUNCTION == nodeType(pPartKey) && FUNCTION_TYPE_TAGS == ((SFunctionNode*)pPartKey)->funcType) || + (QUERY_NODE_COLUMN == nodeType(pPartKey) && COLUMN_TYPE_TAG == ((SColumnNode*)pPartKey)->colType)) { + return true; + } + } + return false; +} + +bool isPartTagAgg(SAggLogicNode* pAgg) { + if (1 != LIST_LENGTH(pAgg->node.pChildren)) { + return false; + } + if (pAgg->pGroupKeys) { + return stbHasPartTag(pAgg->pGroupKeys) && + stbNotSystemScan((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0)); + } + return stbHasPartTag(stbGetPartKeys((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0))); } bool isPartTableWinodw(SWindowLogicNode* pWindow) { - return stbHasPartTbname(stbSplGetPartKeys((SLogicNode*)nodesListGetNode(pWindow->node.pChildren, 0))); + return stbHasPartTbname(stbGetPartKeys((SLogicNode*)nodesListGetNode(pWindow->node.pChildren, 0))); } +bool cloneLimit(SLogicNode* pParent, SLogicNode* pChild, uint8_t cloneWhat) { + SLimitNode* pLimit; + bool cloned = false; + if (pParent->pLimit && (cloneWhat & CLONE_LIMIT)) { + pChild->pLimit = nodesCloneNode(pParent->pLimit); + pLimit = (SLimitNode*)pChild->pLimit; + pLimit->limit += pLimit->offset; + pLimit->offset = 0; + cloned = true; + } + if (pParent->pSlimit && (cloneWhat & CLONE_SLIMIT)) { + pChild->pSlimit = nodesCloneNode(pParent->pSlimit); + pLimit = (SLimitNode*)pChild->pSlimit; + pLimit->limit += pLimit->offset; + pLimit->offset = 0; + cloned = true; + } + return cloned; +}