From 67991804a6ab97c170513689eabb7278c4ed215a Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 23 Jul 2023 18:14:07 +0800 Subject: [PATCH 001/147] test: add build --- cmake/cmake.define | 1 - 1 file changed, 1 deletion(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index cf7f450994..c72d48d7f3 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -1,5 +1,4 @@ cmake_minimum_required(VERSION 3.0) - set(CMAKE_VERBOSE_MAKEFILE ON) set(TD_BUILD_TAOSA_INTERNAL FALSE) From 41aa6a0c671e4210aff4d4a9852817df069704dc Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Sun, 23 Jul 2023 18:17:18 +0800 Subject: [PATCH 002/147] test: test build taos-tools --- source/libs/wal/src/walMeta.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 3d457c9b5f..0fe7e3a1a0 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -20,6 +20,7 @@ #include "tutil.h" #include "walInt.h" + bool FORCE_INLINE walLogExist(SWal* pWal, int64_t ver) { return !walIsEmpty(pWal) && walGetFirstVer(pWal) <= ver && walGetLastVer(pWal) >= ver; } From cddf27e8b4d9183b321d83ea2df1ef5561a294bd Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 31 Jul 2023 18:48:50 +0800 Subject: [PATCH 003/147] fix:add committed & seek process logic --- source/client/test/clientTests.cpp | 3 ++- source/dnode/mnode/impl/src/mndSubscribe.c | 1 + source/dnode/vnode/src/tq/tq.c | 6 +++--- source/dnode/vnode/src/vnd/vnodeSvr.c | 5 +++++ 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp index d88a26cbb2..6f978b0143 100644 --- a/source/client/test/clientTests.cpp +++ b/source/client/test/clientTests.cpp @@ -1251,7 +1251,8 @@ TEST(clientCase, td_25129) { } for(int i = 0; i < numOfAssign; i++){ - printf("assign i:%d, vgId:%d, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); + int64_t committed = tmq_committed(tmq, topicName, pAssign[i].vgId); + printf("assign i:%d, vgId:%d, committed:%lld, offset:%lld, start:%lld, end:%lld\n", i, pAssign[i].vgId, committed, pAssign[i].currentOffset, pAssign[i].begin, pAssign[i].end); } while (1) { diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 6bd23c3b90..85054e5cd7 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -692,6 +692,7 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) { taosArrayDestroy(rebOutput.modifyConsumers); taosArrayDestroy(rebOutput.rebVgs); + taosHashCancelIterate(pReq->rebSubHash, pIter); terrno = TSDB_CODE_OUT_OF_MEMORY; mInfo("mq re-balance failed, due to out of memory"); taosHashCleanup(pReq->rebSubHash); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 4d4383473d..2999944dd0 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -336,10 +336,10 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t sversion, char* msg, int32_t STqOffset* pOffset = &vgOffset.offset; if (pOffset->val.type == TMQ_OFFSET__SNAPSHOT_DATA || pOffset->val.type == TMQ_OFFSET__SNAPSHOT_META) { - tqInfo("receive offset commit msg to %s on vgId:%d, offset(type:snapshot) uid:%" PRId64 ", ts:%" PRId64, + tqDebug("receive offset commit msg to %s on vgId:%d, offset(type:snapshot) uid:%" PRId64 ", ts:%" PRId64, pOffset->subKey, vgId, pOffset->val.uid, pOffset->val.ts); } else if (pOffset->val.type == TMQ_OFFSET__LOG) { - tqInfo("receive offset commit msg to %s on vgId:%d, offset(type:log) version:%" PRId64, pOffset->subKey, vgId, + tqDebug("receive offset commit msg to %s on vgId:%d, offset(type:log) version:%" PRId64, pOffset->subKey, vgId, pOffset->val.version); } else { tqError("invalid commit offset type:%d", pOffset->val.type); @@ -367,12 +367,12 @@ int32_t tqProcessSeekReq(STQ* pTq, SRpcMsg* pMsg) { SRpcMsg rsp = {.info = pMsg->info}; int code = 0; - tqDebug("tmq seek: consumer:0x%" PRIx64 " vgId:%d, subkey %s", req.consumerId, vgId, req.subKey); if (tDeserializeSMqSeekReq(pMsg->pCont, pMsg->contLen, &req) < 0) { code = TSDB_CODE_OUT_OF_MEMORY; goto end; } + tqDebug("tmq seek: consumer:0x%" PRIx64 " vgId:%d, subkey %s", req.consumerId, vgId, req.subKey); STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey)); if (pHandle == NULL) { tqWarn("tmq seek: consumer:0x%" PRIx64 " vgId:%d subkey %s not found", req.consumerId, vgId, req.subKey); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 81f87a3e22..743470aac8 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -624,6 +624,11 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { // return tqProcessPollReq(pVnode->pTq, pMsg); case TDMT_VND_TMQ_VG_WALINFO: return tqProcessVgWalInfoReq(pVnode->pTq, pMsg); + case TDMT_VND_TMQ_VG_COMMITTEDINFO: + return tqProcessVgCommittedInfoReq(pVnode->pTq, pMsg); + case TDMT_VND_TMQ_SEEK: + return tqProcessSeekReq(pVnode->pTq, pMsg); + default: vError("unknown msg type:%d in fetch queue", pMsg->msgType); return TSDB_CODE_APP_ERROR; From 8a0e9ff0e0722e9578a1cc53933664c0dea6fb64 Mon Sep 17 00:00:00 2001 From: dmchen Date: Tue, 1 Aug 2023 14:24:28 +0800 Subject: [PATCH 004/147] fix/retry_resolve_fqdn --- include/common/tglobal.h | 1 + source/common/src/tglobal.c | 3 +++ source/libs/sync/src/syncUtil.c | 18 +++++++++++++++++- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 8ea0a857e8..81f2af98f5 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -198,6 +198,7 @@ extern bool tsFilterScalarMode; extern int32_t tsKeepTimeOffset; extern int32_t tsMaxStreamBackendCache; extern int32_t tsPQSortMemThreshold; +extern int32_t tsResolveFQDNRetryTime; // #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 0546ed7f47..56202899cd 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -234,6 +234,7 @@ int64_t tsStreamBufferSize = 128 * 1024 * 1024; int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000; bool tsFilterScalarMode = false; int32_t tsKeepTimeOffset = 0; // latency of data migration +int tsResolveFQDNRetryTime = 100; //seconds #ifndef _STORAGE int32_t taosSetTfsCfg(SConfig *pCfg) { @@ -559,6 +560,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "keepTimeOffset", tsKeepTimeOffset, 0, 23, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "resolveFQDNRetryTime", tsResolveFQDNRetryTime, 1, 10240, 0) != 0) return -1; GRANT_CFG_ADD; return 0; @@ -947,6 +949,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsKeepTimeOffset = cfgGetItem(pCfg, "keepTimeOffset")->i32; tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32; tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32; + tsResolveFQDNRetryTime = cfgGetItem(pCfg, "resolveFQDNRetryTime")->i32; GRANT_CFG_GET; return 0; diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index ae1c775a18..9acc17e130 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -21,6 +21,7 @@ #include "syncRaftCfg.h" #include "syncRaftStore.h" #include "syncSnapshot.h" +#include "tglobal.h" void syncCfg2SimpleStr(const SSyncCfg* pCfg, char* buf, int32_t bufLen) { int32_t len = snprintf(buf, bufLen, "{num:%d, as:%d, [", pCfg->replicaNum, pCfg->myIndex); @@ -41,7 +42,22 @@ void syncUtilNodeInfo2EpSet(const SNodeInfo* pInfo, SEpSet* pEpSet) { } bool syncUtilNodeInfo2RaftId(const SNodeInfo* pInfo, SyncGroupId vgId, SRaftId* raftId) { - uint32_t ipv4 = taosGetIpv4FromFqdn(pInfo->nodeFqdn); + uint32_t ipv4 = 0xFFFFFFFF; + sDebug("vgId:%d, start to resolve sync addr fqdn in %d seconds, " + "dnode:%d cluster:%" PRId64 " fqdn:%s port:%u ", + vgId, tsResolveFQDNRetryTime, + pInfo->nodeId, pInfo->clusterId, pInfo->nodeFqdn, pInfo->nodePort); + for(int i = 0; i < tsResolveFQDNRetryTime; i++){ + ipv4 = taosGetIpv4FromFqdn(pInfo->nodeFqdn); + if (ipv4 == 0xFFFFFFFF || ipv4 == 1) { + sError("failed to resolve ipv4 addr, fqdn:%s, wait one second", pInfo->nodeFqdn); + taosSsleep(1); + } + else{ + break; + } + } + if (ipv4 == 0xFFFFFFFF || ipv4 == 1) { sError("failed to resolve ipv4 addr, fqdn:%s", pInfo->nodeFqdn); terrno = TSDB_CODE_TSC_INVALID_FQDN; From 88d755be76d3b75bcebf9b114363ce0b5b4dcae1 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 13:31:00 +0800 Subject: [PATCH 005/147] feat(tsdb/cos): s3 migration --- cmake/cmake.options | 6 + cmake/cos_CMakeLists.txt.in | 12 + contrib/CMakeLists.txt | 23 + contrib/test/CMakeLists.txt | 5 + contrib/test/cos/CMakeLists.txt | 49 + contrib/test/cos/main.c | 3090 +++++++++++++++++++ source/common/src/tglobal.c | 68 +- source/dnode/vnode/CMakeLists.txt | 28 +- source/dnode/vnode/src/inc/vndCos.h | 36 + source/dnode/vnode/src/tsdb/tsdbRetention.c | 120 +- source/dnode/vnode/src/vnd/vnodeCos.c | 114 + source/dnode/vnode/src/vnd/vnodeModule.c | 5 + 12 files changed, 3530 insertions(+), 26 deletions(-) create mode 100644 cmake/cos_CMakeLists.txt.in create mode 100644 contrib/test/cos/CMakeLists.txt create mode 100644 contrib/test/cos/main.c create mode 100644 source/dnode/vnode/src/inc/vndCos.h create mode 100644 source/dnode/vnode/src/vnd/vnodeCos.c diff --git a/cmake/cmake.options b/cmake/cmake.options index fa0b888415..ea5efcb13a 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -125,6 +125,12 @@ option( ON ) +option( + BUILD_WITH_COS + "If build with cos" + ON +) + option( BUILD_WITH_SQLITE "If build with sqlite" diff --git a/cmake/cos_CMakeLists.txt.in b/cmake/cos_CMakeLists.txt.in new file mode 100644 index 0000000000..ee1e58b50f --- /dev/null +++ b/cmake/cos_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# cos +ExternalProject_Add(cos + GIT_REPOSITORY https://github.com/tencentyun/cos-c-sdk-v5.git + GIT_TAG v5.0.16 + SOURCE_DIR "${TD_CONTRIB_DIR}/cos-c-sdk-v5" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index c60fd33b16..df9519d00f 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -122,6 +122,12 @@ if(${BUILD_WITH_SQLITE}) cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif(${BUILD_WITH_SQLITE}) +# cos +if(${BUILD_WITH_COS}) + cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + add_definitions(-DUSE_COS) +endif(${BUILD_WITH_COS}) + # lucene if(${BUILD_WITH_LUCENE}) cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -347,6 +353,23 @@ if (${BUILD_WITH_ROCKSDB}) endif() endif() +# cos +if(${BUILD_WITH_COS}) + option(ENABLE_TEST "Enable the tests" OFF) + + set(CMAKE_BUILD_TYPE debug) + set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) + set(CMAKE_PROJECT_NAME cos_c_sdk) + + add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL) + target_include_directories( + cos_c_sdk + PUBLIC $ + ) + + set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) +endif(${BUILD_WITH_COS}) + # lucene # To support build on ubuntu: sudo apt-get install libboost-all-dev if(${BUILD_WITH_LUCENE}) diff --git a/contrib/test/CMakeLists.txt b/contrib/test/CMakeLists.txt index f35cf0d13d..1deff5a67e 100644 --- a/contrib/test/CMakeLists.txt +++ b/contrib/test/CMakeLists.txt @@ -3,6 +3,11 @@ if(${BUILD_WITH_ROCKSDB}) add_subdirectory(rocksdb) endif(${BUILD_WITH_ROCKSDB}) +# cos +if(${BUILD_WITH_COS}) + add_subdirectory(cos) +endif(${BUILD_WITH_COS}) + if(${BUILD_WITH_LUCENE}) add_subdirectory(lucene) endif(${BUILD_WITH_LUCENE}) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt new file mode 100644 index 0000000000..77c57e5a65 --- /dev/null +++ b/contrib/test/cos/CMakeLists.txt @@ -0,0 +1,49 @@ +add_executable(cosTest "") +target_sources(cosTest + PRIVATE + "${CMAKE_CURRENT_SOURCE_DIR}/main.c" + ) + +#find_path(APR_INCLUDE_DIR apr-1/apr_time.h) +#find_path(APR_UTIL_INCLUDE_DIR apr/include/apr-1/apr_md5.h) +#find_path(MINIXML_INCLUDE_DIR mxml.h) +#find_path(CURL_INCLUDE_DIR curl/curl.h) + +#include_directories (${MINIXML_INCLUDE_DIR}) +#include_directories (${CURL_INCLUDE_DIR}) +FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) +#FIND_PROGRAM(APU_CONFIG_BIN NAMES apu-config apu-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) + +IF (APR_CONFIG_BIN) + EXECUTE_PROCESS( + COMMAND ${APR_CONFIG_BIN} --includedir + OUTPUT_VARIABLE APR_INCLUDE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +ENDIF() +#IF (APU_CONFIG_BIN) +# EXECUTE_PROCESS( +# COMMAND ${APU_CONFIG_BIN} --includedir +# OUTPUT_VARIABLE APR_UTIL_INCLUDE_DIR +# OUTPUT_STRIP_TRAILING_WHITESPACE +# ) +#ENDIF() + +include_directories (${APR_INCLUDE_DIR}) +#include_directories (${APR_UTIL_INCLUDE_DIR}) + +target_include_directories( + cosTest + PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + ) + +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) + +target_link_libraries(cosTest cos_c_sdk) +target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) +target_link_libraries(cosTest ${APR_LIBRARY}) +target_link_libraries(cosTest ${MINIXML_LIBRARY}) +target_link_libraries(cosTest ${CURL_LIBRARY}) diff --git a/contrib/test/cos/main.c b/contrib/test/cos/main.c new file mode 100644 index 0000000000..faaceee2e3 --- /dev/null +++ b/contrib/test/cos/main.c @@ -0,0 +1,3090 @@ +#include +#include +#include +#include + +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + +// endpoint 是 COS 访问域名信息,详情请参见 https://cloud.tencent.com/document/product/436/6224 文档 +// static char TEST_COS_ENDPOINT[] = "cos.ap-guangzhou.myqcloud.com"; +// static char TEST_COS_ENDPOINT[] = "http://oss-cn-beijing.aliyuncs.com"; +static char TEST_COS_ENDPOINT[] = "http://cos.ap-beijing.myqcloud.com"; +// 数据万象的访问域名,详情请参见 https://cloud.tencent.com/document/product/460/31066 文档 +static char TEST_CI_ENDPOINT[] = "https://ci.ap-guangzhou.myqcloud.com"; +// 开发者拥有的项目身份ID/密钥,可在 https://console.cloud.tencent.com/cam/capi 页面获取 +static char *TEST_ACCESS_KEY_ID; // your secret_id +static char *TEST_ACCESS_KEY_SECRET; // your secret_key +// 开发者访问 COS 服务时拥有的用户维度唯一资源标识,用以标识资源,可在 https://console.cloud.tencent.com/cam/capi +// 页面获取 +// static char TEST_APPID[] = ""; // your appid +// static char TEST_APPID[] = "119"; // your appid +static char TEST_APPID[] = "1309024725"; // your appid +// the cos bucket name, syntax: [bucket]-[appid], for example: mybucket-1253666666,可在 +// https://console.cloud.tencent.com/cos5/bucket 查看 static char TEST_BUCKET_NAME[] = ""; +// static char TEST_BUCKET_NAME[] = ""; +// static char TEST_BUCKET_NAME[] = "test-bucket-119"; +static char TEST_BUCKET_NAME[] = "test0711-1309024725"; +// 对象拥有者,比如用户UIN:100000000001 +static char TEST_UIN[] = ""; // your uin +// 地域信息,枚举值可参见 https://cloud.tencent.com/document/product/436/6224 +// 文档,例如:ap-beijing、ap-hongkong、eu-frankfurt 等 +static char TEST_REGION[] = "ap-guangzhou"; // region in endpoint +// 对象键,对象(Object)在存储桶(Bucket)中的唯一标识。有关对象与对象键的进一步说明,请参见 +// https://cloud.tencent.com/document/product/436/13324 文档 +static char TEST_OBJECT_NAME1[] = "1.txt"; +static char TEST_OBJECT_NAME2[] = "test2.dat"; +static char TEST_OBJECT_NAME3[] = "test3.dat"; +static char TEST_OBJECT_NAME4[] = "multipart.txt"; +// static char TEST_DOWNLOAD_NAME2[] = "download_test2.dat"; +static char *TEST_APPEND_NAMES[] = {"test.7z.001", "test.7z.002"}; +static char TEST_DOWNLOAD_NAME3[] = "download_test3.dat"; +static char TEST_MULTIPART_OBJECT[] = "multipart.dat"; +static char TEST_DOWNLOAD_NAME4[] = "multipart_download.dat"; +static char TEST_MULTIPART_FILE[] = "test.zip"; +// static char TEST_MULTIPART_OBJECT2[] = "multipart2.dat"; +static char TEST_MULTIPART_OBJECT3[] = "multipart3.dat"; +static char TEST_MULTIPART_OBJECT4[] = "multipart4.dat"; + +static void print_headers(cos_table_t *headers) { + const cos_array_header_t *tarr; + const cos_table_entry_t *telts; + int i = 0; + + if (apr_is_empty_table(headers)) { + return; + } + + tarr = cos_table_elts(headers); + telts = (cos_table_entry_t *)tarr->elts; + + printf("headers:\n"); + for (; i < tarr->nelts; i++) { + telts = (cos_table_entry_t *)(tarr->elts + i * tarr->elt_size); + printf("%s: %s\n", telts->key, telts->val); + } +} + +void init_test_config(cos_config_t *config, int is_cname) { + cos_str_set(&config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&config->appid, TEST_APPID); + config->is_cname = is_cname; +} + +void init_test_request_options(cos_request_options_t *options, int is_cname) { + options->config = cos_config_create(options->pool); + init_test_config(options->config, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); +} + +void log_status(cos_status_t *s) { + cos_warn_log("status->code: %d", s->code); + if (s->error_code) cos_warn_log("status->error_code: %s", s->error_code); + if (s->error_msg) cos_warn_log("status->error_msg: %s", s->error_msg); + if (s->req_id) cos_warn_log("status->req_id: %s", s->req_id); +} + +void test_sign() { + cos_pool_t *p = NULL; + const unsigned char secret_key[] = "your secret_key"; + const unsigned char time_str[] = "1480932292;1481012292"; + unsigned char sign_key[40]; + cos_buf_t *fmt_str; + const char *value = NULL; + const char *uri = "/testfile"; + const char *host = "testbucket-125000000.cn-north.myqcloud.com&range=bytes%3d0-3"; + unsigned char fmt_str_hex[40]; + + cos_pool_create(&p, NULL); + fmt_str = cos_create_buf(p, 1024); + + cos_get_hmac_sha1_hexdigest(sign_key, secret_key, sizeof(secret_key) - 1, time_str, sizeof(time_str) - 1); + char *pstr = apr_pstrndup(p, (char *)sign_key, sizeof(sign_key)); + cos_warn_log("sign_key: %s", pstr); + + // method + value = "get"; + cos_buf_append_string(p, fmt_str, value, strlen(value)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // canonicalized resource(URI) + cos_buf_append_string(p, fmt_str, uri, strlen(uri)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // query-parameters + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + // Host + cos_buf_append_string(p, fmt_str, "host=", sizeof("host=") - 1); + cos_buf_append_string(p, fmt_str, host, strlen(host)); + cos_buf_append_string(p, fmt_str, "\n", sizeof("\n") - 1); + + char *pstr3 = apr_pstrndup(p, (char *)fmt_str->pos, cos_buf_size(fmt_str)); + cos_warn_log("Format string: %s", pstr3); + + // Format-String sha1hash + cos_get_sha1_hexdigest(fmt_str_hex, (unsigned char *)fmt_str->pos, cos_buf_size(fmt_str)); + + char *pstr2 = apr_pstrndup(p, (char *)fmt_str_hex, sizeof(fmt_str_hex)); + cos_warn_log("Format string sha1hash: %s", pstr2); + + cos_pool_destroy(p); +} + +void test_bucket() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_acl_e cos_acl = COS_ACL_PRIVATE; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // create test bucket + s = cos_create_bucket(options, &bucket, cos_acl, &resp_headers); + log_status(s); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->encoding_type, "url"); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + log_status(s); + cos_list_object_content_t *content = NULL; + char *line = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + line = apr_psprintf(p, "%.*s\t%.*s\t%.*s\n", content->key.len, content->key.data, content->size.len, + content->size.data, content->last_modified.len, content->last_modified.data); + printf("%s", line); + printf("next marker: %s\n", list_params->next_marker.data); + } + cos_list_object_common_prefix_t *common_prefix = NULL; + cos_list_for_each_entry(cos_list_object_common_prefix_t, common_prefix, &list_params->common_prefix_list, node) { + printf("common prefix: %s\n", common_prefix->prefix.data); + } + + // delete bucket + s = cos_delete_bucket(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_list_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //获取对象列表 + cos_list_object_params_t *list_params = NULL; + cos_list_object_content_t *content = NULL; + list_params = cos_create_list_object_params(p); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (cos_status_is_ok(s)) { + printf("list object succeeded\n"); + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %.*s\n", content->key.len, content->key.data); + } + } else { + printf("list object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_bucket_lifecycle() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_t rule_list; + cos_list_init(&rule_list); + cos_lifecycle_rule_content_t *rule_content = NULL; + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule1"); + cos_str_set(&rule_content->prefix, "abc/"); + cos_str_set(&rule_content->status, "Enabled"); + rule_content->expire.days = 365; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule2"); + cos_str_set(&rule_content->prefix, "efg/"); + cos_str_set(&rule_content->status, "Disabled"); + cos_str_set(&rule_content->transition.storage_class, "Standard_IA"); + rule_content->transition.days = 999; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_lifecycle_rule_content(p); + cos_str_set(&rule_content->id, "testrule3"); + cos_str_set(&rule_content->prefix, "xxx/"); + cos_str_set(&rule_content->status, "Enabled"); + rule_content->abort.days = 1; + cos_list_add_tail(&rule_content->node, &rule_list); + + s = cos_put_bucket_lifecycle(options, &bucket, &rule_list, &resp_headers); + log_status(s); + + cos_list_t rule_list_ret; + cos_list_init(&rule_list_ret); + s = cos_get_bucket_lifecycle(options, &bucket, &rule_list_ret, &resp_headers); + log_status(s); + + cos_delete_bucket_lifecycle(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_put_object_with_limit() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + + //上传对象 + cos_str_set(&file, "test_file.bin"); + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object succeeded\n"); + } else { + printf("put object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_get_object_with_limit() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + + //下载对象 + cos_str_set(&file, "test_file.bin"); + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); + if (cos_status_is_ok(s)) { + printf("get object succeeded\n"); + } else { + printf("get object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_gen_object_url() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + printf("url:%s\n", cos_gen_object_url(options, &bucket, &object)); + + cos_pool_destroy(p); +} + +void test_create_dir() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_list_t buffer; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "folder/"); + + //上传文件夹 + cos_list_init(&buffer); + s = cos_put_object_from_buffer(options, &bucket, &object, &buffer, headers, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object succeeded\n"); + } else { + printf("put object failed\n"); + } + cos_pool_destroy(p); +} + +void test_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_list_t buffer; + cos_buf_t *content = NULL; + char *str = "This is my test data."; + cos_string_t file; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + cos_list_init(&buffer); + content = cos_buf_pack(options->pool, str, strlen(str)); + cos_list_add_tail(&content->node, &buffer); + s = cos_put_object_from_buffer(options, &bucket, &object, &buffer, headers, &resp_headers); + log_status(s); + + cos_list_t download_buffer; + cos_list_init(&download_buffer); + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + s = cos_get_object_to_buffer(options, &bucket, &object, headers, NULL, &download_buffer, &resp_headers); + log_status(s); + print_headers(resp_headers); + int64_t len = 0; + int64_t size = 0; + int64_t pos = 0; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { len += cos_buf_size(content); } + char *buf = cos_pcalloc(p, (apr_size_t)(len + 1)); + buf[len] = '\0'; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { + size = cos_buf_size(content); + memcpy(buf + pos, content->pos, (size_t)size); + pos += size; + } + cos_warn_log("Download data=%s", buf); + + cos_str_set(&file, TEST_OBJECT_NAME4); + cos_str_set(&object, TEST_OBJECT_NAME4); + s = cos_put_object_from_file(options, &bucket, &object, &file, NULL, &resp_headers); + log_status(s); + + cos_str_set(&file, TEST_DOWNLOAD_NAME3); + cos_str_set(&object, TEST_OBJECT_NAME3); + s = cos_get_object_to_file(options, &bucket, &object, NULL, NULL, &file, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME2); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_delete_object(options, &bucket, &object, &resp_headers); + log_status(s); + + cos_str_set(&object, TEST_OBJECT_NAME3); + s = cos_delete_object(options, &bucket, &object, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_append_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //追加上传对象 + cos_str_set(&object, TEST_OBJECT_NAME3); + int32_t count = sizeof(TEST_APPEND_NAMES) / sizeof(char *); + int32_t index = 0; + int64_t position = 0; + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + if (s->code == 200) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + position = atol(content_length_str); + } + } + for (; index < count; index++) { + cos_str_set(&file, TEST_APPEND_NAMES[index]); + s = cos_append_object_from_file(options, &bucket, &object, position, &file, NULL, &resp_headers); + log_status(s); + + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + if (s->code == 200) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + position = atol(content_length_str); + } + } + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_head_object() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //获取对象元数据 + cos_str_set(&object, TEST_OBJECT_NAME1); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + print_headers(resp_headers); + if (cos_status_is_ok(s)) { + printf("head object succeeded\n"); + } else { + printf("head object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_check_object_exist() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_object_exist_status_e object_exist; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + // 检查对象是否存在 + s = cos_check_object_exist(options, &bucket, &object, headers, &object_exist, &resp_headers); + if (object_exist == COS_OBJECT_NON_EXIST) { + printf("object: %.*s non exist.\n", object.len, object.data); + } else if (object_exist == COS_OBJECT_EXIST) { + printf("object: %.*s exist.\n", object.len, object.data); + } else { + printf("object: %.*s unknown status.\n", object.len, object.data); + log_status(s); + } + + cos_pool_destroy(p); +} + +void test_object_restore() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_status_t *s = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_restore.dat"); + + cos_object_restore_params_t *restore_params = cos_create_object_restore_params(p); + restore_params->days = 30; + cos_str_set(&restore_params->tier, "Standard"); + s = cos_post_object_restore(options, &bucket, &object, restore_params, NULL, NULL, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void progress_callback(int64_t consumed_bytes, int64_t total_bytes) { + printf("consumed_bytes = %" APR_INT64_T_FMT ", total_bytes = %" APR_INT64_T_FMT "\n", consumed_bytes, total_bytes); +} + +void test_put_object_from_file() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_table_t *headers = NULL; + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, TEST_OBJECT_NAME4); + cos_str_set(&object, TEST_OBJECT_NAME4); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_put_object_from_file_with_sse() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_table_t *headers = NULL; + headers = cos_table_make(p, 3); + // apr_table_add(headers, "x-cos-server-side-encryption", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-algorithm", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key", "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY="); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key-MD5", "U5L61r7jcwdNvT7frmUG8g=="); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, "/home/jojoliang/data/test.jpg"); + cos_str_set(&object, "pic"); + + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + { + int i = 0; + apr_array_header_t *pp = (apr_array_header_t *)apr_table_elts(resp_headers); + for (; i < pp->nelts; i++) { + apr_table_entry_t *ele = (apr_table_entry_t *)pp->elts + i; + printf("%s: %s\n", ele->key, ele->val); + } + } + + cos_pool_destroy(p); +} + +void test_get_object_to_file_with_sse() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_string_t file; + cos_table_t *headers = NULL; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 3); + /* + apr_table_add(headers, "x-cos-server-side-encryption", "AES256"); + */ + /* + apr_table_add(headers, "x-cos-server-side-encryption-customer-algorithm", "AES256"); + apr_table_add(headers, "x-cos-server-side-encryption-customer-key", + "MDEyMzQ1Njc4OUFCQ0RFRjAxMjM0NTY3ODlBQkNERUY="); apr_table_add(headers, + "x-cos-server-side-encryption-customer-key-MD5", "U5L61r7jcwdNvT7frmUG8g=="); + */ + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&file, "getfile"); + cos_str_set(&object, TEST_OBJECT_NAME1); + + s = cos_get_object_to_file(options, &bucket, &object, headers, params, &file, &resp_headers); + log_status(s); + + { + int i = 0; + apr_array_header_t *pp = (apr_array_header_t *)apr_table_elts(resp_headers); + for (; i < pp->nelts; i++) { + apr_table_entry_t *ele = (apr_table_entry_t *)pp->elts + i; + printf("%s: %s\n", ele->key, ele->val); + } + } + + cos_pool_destroy(p); +} + +void multipart_upload_file_from_file() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *complete_headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_upload_file_t *upload_file = NULL; + cos_status_t *s = NULL; + cos_list_upload_part_params_t *params = NULL; + cos_list_t complete_part_list; + cos_list_part_content_t *part_content = NULL; + cos_complete_part_content_t *complete_part_content = NULL; + int part_num = 1; + int64_t pos = 0; + int64_t file_length = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + complete_headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + // init mulitipart + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + // upload part from file + int res = COSE_OK; + cos_file_buf_t *fb = cos_create_file_buf(p); + res = cos_open_file_for_all_read(p, TEST_MULTIPART_FILE, fb); + if (res != COSE_OK) { + cos_error_log("Open read file fail, filename:%s\n", TEST_MULTIPART_FILE); + return; + } + file_length = fb->file_last; + apr_file_close(fb->file); + while (pos < file_length) { + upload_file = cos_create_upload_file(p); + cos_str_set(&upload_file->filename, TEST_MULTIPART_FILE); + upload_file->file_pos = pos; + pos += 2 * 1024 * 1024; + upload_file->file_last = pos < file_length ? pos : file_length; // 2MB + s = cos_upload_part_from_file(options, &bucket, &object, &upload_id, part_num++, upload_file, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Multipart upload part from file succeeded\n"); + } else { + printf("Multipart upload part from file failed\n"); + } + } + + // list part + params = cos_create_list_upload_part_params(p); + params->max_ret = 1000; + cos_list_init(&complete_part_list); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, params, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("List multipart succeeded\n"); + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", part_content->part_number.data, + part_content->size.data, part_content->last_modified.data, part_content->etag.data); + } + } else { + printf("List multipart failed\n"); + cos_pool_destroy(p); + return; + } + + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, part_content->part_number.data); + cos_str_set(&complete_part_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + } + + // complete multipart + s = cos_complete_multipart_upload(options, &bucket, &object, &upload_id, &complete_part_list, complete_headers, + &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Complete multipart upload from file succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Complete multipart upload from file failed\n"); + } + + cos_pool_destroy(p); +} + +void multipart_upload_file_from_buffer() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *complete_headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_status_t *s = NULL; + cos_list_t complete_part_list; + cos_complete_part_content_t *complete_part_content = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + complete_headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + // init mulitipart + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + // upload part from buffer + char *str = "This is my test data...."; + cos_list_t buffer; + cos_buf_t *content; + + // 上传一个分块 + cos_list_init(&buffer); + content = cos_buf_pack(p, str, strlen(str)); + cos_list_add_tail(&content->node, &buffer); + s = cos_upload_part_from_buffer(options, &bucket, &object, &upload_id, 1, &buffer, &resp_headers); + + // 直接获取etag + char *etag = apr_pstrdup(p, (char *)apr_table_get(resp_headers, "ETag")); + cos_list_init(&complete_part_list); + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, "1"); + cos_str_set(&complete_part_content->etag, etag); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + + // 也可以通过 list part 获取取etag + /* + //list part + params = cos_create_list_upload_part_params(p); + params->max_ret = 1000; + cos_list_init(&complete_part_list); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, + params, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("List multipart succeeded\n"); + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", + part_content->part_number.data, + part_content->size.data, + part_content->last_modified.data, + part_content->etag.data); + } + } else { + printf("List multipart failed\n"); + cos_pool_destroy(p); + return; + } + + cos_list_for_each_entry(cos_list_part_content_t, part_content, ¶ms->part_list, node) { + complete_part_content = cos_create_complete_part_content(p); + cos_str_set(&complete_part_content->part_number, part_content->part_number.data); + cos_str_set(&complete_part_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_part_content->node, &complete_part_list); + } + */ + + // complete multipart + s = cos_complete_multipart_upload(options, &bucket, &object, &upload_id, &complete_part_list, complete_headers, + &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Complete multipart upload from file succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Complete multipart upload from file failed\n"); + } + + cos_pool_destroy(p); +} + +void abort_multipart_upload() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_string_t upload_id; + cos_status_t *s = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 1); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, headers, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Init multipart upload succeeded, upload_id:%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Init multipart upload failed\n"); + cos_pool_destroy(p); + return; + } + + s = cos_abort_multipart_upload(options, &bucket, &object, &upload_id, &resp_headers); + + if (cos_status_is_ok(s)) { + printf("Abort multipart upload succeeded, upload_id::%.*s\n", upload_id.len, upload_id.data); + } else { + printf("Abort multipart upload failed\n"); + } + + cos_pool_destroy(p); +} + +void list_multipart() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + int is_cname = 0; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_status_t *s = NULL; + cos_list_multipart_upload_params_t *list_multipart_params = NULL; + cos_list_upload_part_params_t *list_upload_param = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + list_multipart_params = cos_create_list_multipart_upload_params(p); + list_multipart_params->max_ret = 999; + s = cos_list_multipart_upload(options, &bucket, list_multipart_params, &resp_headers); + log_status(s); + + list_upload_param = cos_create_list_upload_part_params(p); + list_upload_param->max_ret = 1000; + cos_string_t upload_id; + cos_str_set(&upload_id, "149373379126aee264fecbf5fe8ddb8b9cd23b76c73ab1af0bcfd50683cc4254f81ebe2386"); + cos_str_set(&object, TEST_MULTIPART_OBJECT); + s = cos_list_upload_part(options, &bucket, &object, &upload_id, list_upload_param, &resp_headers); + if (cos_status_is_ok(s)) { + printf("List upload part succeeded, upload_id::%.*s\n", upload_id.len, upload_id.data); + cos_list_part_content_t *part_content = NULL; + cos_list_for_each_entry(cos_list_part_content_t, part_content, &list_upload_param->part_list, node) { + printf("part_number = %s, size = %s, last_modified = %s, etag = %s\n", part_content->part_number.data, + part_content->size.data, part_content->last_modified.data, part_content->etag.data); + } + } else { + printf("List upload part failed\n"); + } + + cos_pool_destroy(p); +} + +void test_resumable() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t filepath; + cos_resumable_clt_params_t *clt_params; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT4); + cos_str_set(&filepath, TEST_DOWNLOAD_NAME4); + + clt_params = cos_create_resumable_clt_params_content(p, 5 * 1024 * 1024, 3, COS_FALSE, NULL); + s = cos_resumable_download_file(options, &bucket, &object, &filepath, NULL, NULL, clt_params, NULL); + log_status(s); + + cos_pool_destroy(p); +} + +void test_resumable_upload_with_multi_threads() { + cos_pool_t *p = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t filename; + cos_status_t *s = NULL; + int is_cname = 0; + cos_table_t *headers = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_resumable_clt_params_t *clt_params; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + headers = cos_table_make(p, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_MULTIPART_OBJECT4); + cos_str_set(&filename, TEST_MULTIPART_FILE); + + // upload + clt_params = cos_create_resumable_clt_params_content(p, 1024 * 1024, 8, COS_FALSE, NULL); + s = cos_resumable_upload_file(options, &bucket, &object, &filename, headers, NULL, clt_params, NULL, &resp_headers, + NULL); + + if (cos_status_is_ok(s)) { + printf("upload succeeded\n"); + } else { + printf("upload failed\n"); + } + + cos_pool_destroy(p); +} + +void test_delete_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_status_t *s = NULL; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + char *object_name1 = TEST_OBJECT_NAME2; + char *object_name2 = TEST_OBJECT_NAME3; + cos_object_key_t *content1 = NULL; + cos_object_key_t *content2 = NULL; + cos_list_t object_list; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_init(&object_list); + cos_list_init(&deleted_object_list); + content1 = cos_create_cos_object_key(p); + cos_str_set(&content1->key, object_name1); + cos_list_add_tail(&content1->node, &object_list); + content2 = cos_create_cos_object_key(p); + cos_str_set(&content2->key, object_name2); + cos_list_add_tail(&content2->node, &object_list); + + s = cos_delete_objects(options, &bucket, &object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + + cos_pool_destroy(p); + + if (cos_status_is_ok(s)) { + printf("delete objects succeeded\n"); + } else { + printf("delete objects failed\n"); + } +} + +void test_delete_objects_by_prefix() { + cos_pool_t *p = NULL; + cos_request_options_t *options = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_status_t *s = NULL; + cos_string_t prefix; + char *prefix_str = ""; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&prefix, prefix_str); + + s = cos_delete_objects_by_prefix(options, &bucket, &prefix); + log_status(s); + cos_pool_destroy(p); + + printf("test_delete_object_by_prefix ok\n"); +} + +void test_acl() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_acl_e cos_acl = COS_ACL_PRIVATE; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.txt"); + + // put acl + cos_string_t read; + cos_str_set(&read, "id=\"qcs::cam::uin/12345:uin/12345\", id=\"qcs::cam::uin/45678:uin/45678\""); + s = cos_put_bucket_acl(options, &bucket, cos_acl, &read, NULL, NULL, &resp_headers); + log_status(s); + + // get acl + cos_acl_params_t *acl_params = NULL; + acl_params = cos_create_acl_params(p); + s = cos_get_bucket_acl(options, &bucket, acl_params, &resp_headers); + log_status(s); + printf("acl owner id:%s, name:%s\n", acl_params->owner_id.data, acl_params->owner_name.data); + cos_acl_grantee_content_t *acl_content = NULL; + cos_list_for_each_entry(cos_acl_grantee_content_t, acl_content, &acl_params->grantee_list, node) { + printf("acl grantee type:%s, id:%s, name:%s, permission:%s\n", acl_content->type.data, acl_content->id.data, + acl_content->name.data, acl_content->permission.data); + } + + // put acl + s = cos_put_object_acl(options, &bucket, &object, cos_acl, &read, NULL, NULL, &resp_headers); + log_status(s); + + // get acl + cos_acl_params_t *acl_params2 = NULL; + acl_params2 = cos_create_acl_params(p); + s = cos_get_object_acl(options, &bucket, &object, acl_params2, &resp_headers); + log_status(s); + printf("acl owner id:%s, name:%s\n", acl_params2->owner_id.data, acl_params2->owner_name.data); + acl_content = NULL; + cos_list_for_each_entry(cos_acl_grantee_content_t, acl_content, &acl_params2->grantee_list, node) { + printf("acl grantee id:%s, name:%s, permission:%s\n", acl_content->id.data, acl_content->name.data, + acl_content->permission.data); + } + + cos_pool_destroy(p); +} + +void test_copy() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //设置对象复制 + cos_str_set(&object, TEST_OBJECT_NAME2); + cos_str_set(&src_bucket, TEST_BUCKET_NAME); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME1); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, NULL, params, &resp_headers); + if (cos_status_is_ok(s)) { + printf("put object copy succeeded\n"); + } else { + printf("put object copy failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_modify_storage_class() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + cos_str_set(&src_bucket, TEST_BUCKET_NAME); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME1); + + // 设置x-cos-metadata-directive和x-cos-storage-class头域(替换为自己要更改的存储类型) + cos_table_t *headers = cos_table_make(p, 2); + apr_table_add(headers, "x-cos-metadata-directive", "Replaced"); + // 存储类型包括NTELLIGENT_TIERING,MAZ_INTELLIGENT_TIERING,STANDARD_IA,ARCHIVE,DEEP_ARCHIVE + apr_table_add(headers, "x-cos-storage-class", "ARCHIVE"); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, headers, params, + &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_copy_mt() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_bucket; + cos_string_t src_object; + cos_string_t src_endpoint; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_copy.txt"); + cos_str_set(&src_bucket, "mybucket-1253685564"); + cos_str_set(&src_endpoint, "cn-south.myqcloud.com"); + cos_str_set(&src_object, "test.txt"); + + s = cos_upload_object_by_part_copy_mt(options, &src_bucket, &src_object, &src_endpoint, &bucket, &object, 1024 * 1024, + 8, NULL); + log_status(s); + + cos_pool_destroy(p); +} + +void test_copy_with_part_copy() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t copy_source; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test_copy.txt"); + cos_str_set(©_source, "mybucket-1253685564.cn-south.myqcloud.com/test.txt"); + + s = cos_upload_object_by_part_copy(options, ©_source, &bucket, &object, 1024 * 1024); + log_status(s); + + cos_pool_destroy(p); +} + +void make_rand_string(cos_pool_t *p, int len, cos_string_t *data) { + char *str = NULL; + int i = 0; + str = (char *)cos_palloc(p, len + 1); + for (; i < len; i++) { + str[i] = 'a' + rand() % 32; + } + str[len] = '\0'; + cos_str_set(data, str); +} + +unsigned long get_file_size(const char *file_path) { + unsigned long filesize = -1; + struct stat statbuff; + + if (stat(file_path, &statbuff) < 0) { + return filesize; + } else { + filesize = statbuff.st_size; + } + + return filesize; +} + +void test_part_copy() { + cos_pool_t *p = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + int is_cname = 0; + cos_string_t upload_id; + cos_list_upload_part_params_t *list_upload_part_params = NULL; + cos_upload_part_copy_params_t *upload_part_copy_params1 = NULL; + cos_upload_part_copy_params_t *upload_part_copy_params2 = NULL; + cos_table_t *headers = NULL; + cos_table_t *query_params = NULL; + cos_table_t *resp_headers = NULL; + cos_table_t *list_part_resp_headers = NULL; + cos_list_t complete_part_list; + cos_list_part_content_t *part_content = NULL; + cos_complete_part_content_t *complete_content = NULL; + cos_table_t *complete_resp_headers = NULL; + cos_status_t *s = NULL; + int part1 = 1; + int part2 = 2; + char *local_filename = "test_upload_part_copy.file"; + char *download_filename = "test_upload_part_copy.file.download"; + char *source_object_name = "cos_test_upload_part_copy_source_object"; + char *dest_object_name = "cos_test_upload_part_copy_dest_object"; + FILE *fd = NULL; + cos_string_t download_file; + cos_string_t dest_bucket; + cos_string_t dest_object; + int64_t range_start1 = 0; + int64_t range_end1 = 6000000; + int64_t range_start2 = 6000001; + int64_t range_end2; + cos_string_t data; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + + // create multipart upload local file + make_rand_string(p, 10 * 1024 * 1024, &data); + fd = fopen(local_filename, "w"); + fwrite(data.data, sizeof(data.data[0]), data.len, fd); + fclose(fd); + + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, source_object_name); + cos_str_set(&file, local_filename); + s = cos_put_object_from_file(options, &bucket, &object, &file, NULL, &resp_headers); + log_status(s); + + // init mulitipart + cos_str_set(&object, dest_object_name); + s = cos_init_multipart_upload(options, &bucket, &object, &upload_id, NULL, &resp_headers); + log_status(s); + + // upload part copy 1 + upload_part_copy_params1 = cos_create_upload_part_copy_params(p); + cos_str_set(&upload_part_copy_params1->copy_source, + "bucket-appid.cn-south.myqcloud.com/cos_test_upload_part_copy_source_object"); + cos_str_set(&upload_part_copy_params1->dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&upload_part_copy_params1->dest_object, dest_object_name); + cos_str_set(&upload_part_copy_params1->upload_id, upload_id.data); + upload_part_copy_params1->part_num = part1; + upload_part_copy_params1->range_start = range_start1; + upload_part_copy_params1->range_end = range_end1; + headers = cos_table_make(p, 0); + s = cos_upload_part_copy(options, upload_part_copy_params1, headers, &resp_headers); + log_status(s); + printf("last modified:%s, etag:%s\n", upload_part_copy_params1->rsp_content->last_modify.data, + upload_part_copy_params1->rsp_content->etag.data); + + // upload part copy 2 + resp_headers = NULL; + range_end2 = get_file_size(local_filename) - 1; + upload_part_copy_params2 = cos_create_upload_part_copy_params(p); + cos_str_set(&upload_part_copy_params2->copy_source, + "bucket-appid.cn-south.myqcloud.com/cos_test_upload_part_copy_source_object"); + cos_str_set(&upload_part_copy_params2->dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&upload_part_copy_params2->dest_object, dest_object_name); + cos_str_set(&upload_part_copy_params2->upload_id, upload_id.data); + upload_part_copy_params2->part_num = part2; + upload_part_copy_params2->range_start = range_start2; + upload_part_copy_params2->range_end = range_end2; + headers = cos_table_make(p, 0); + s = cos_upload_part_copy(options, upload_part_copy_params2, headers, &resp_headers); + log_status(s); + printf("last modified:%s, etag:%s\n", upload_part_copy_params1->rsp_content->last_modify.data, + upload_part_copy_params1->rsp_content->etag.data); + + // list part + list_upload_part_params = cos_create_list_upload_part_params(p); + list_upload_part_params->max_ret = 10; + cos_list_init(&complete_part_list); + + cos_str_set(&dest_bucket, TEST_BUCKET_NAME); + cos_str_set(&dest_object, dest_object_name); + s = cos_list_upload_part(options, &dest_bucket, &dest_object, &upload_id, list_upload_part_params, + &list_part_resp_headers); + log_status(s); + cos_list_for_each_entry(cos_list_part_content_t, part_content, &list_upload_part_params->part_list, node) { + complete_content = cos_create_complete_part_content(p); + cos_str_set(&complete_content->part_number, part_content->part_number.data); + cos_str_set(&complete_content->etag, part_content->etag.data); + cos_list_add_tail(&complete_content->node, &complete_part_list); + } + + // complete multipart + headers = cos_table_make(p, 0); + s = cos_complete_multipart_upload(options, &dest_bucket, &dest_object, &upload_id, &complete_part_list, headers, + &complete_resp_headers); + log_status(s); + + // check upload copy part content equal to local file + headers = cos_table_make(p, 0); + cos_str_set(&download_file, download_filename); + s = cos_get_object_to_file(options, &dest_bucket, &dest_object, headers, query_params, &download_file, &resp_headers); + log_status(s); + printf("local file len = %" APR_INT64_T_FMT ", download file len = %" APR_INT64_T_FMT, get_file_size(local_filename), + get_file_size(download_filename)); + remove(download_filename); + remove(local_filename); + cos_pool_destroy(p); + + printf("test part copy ok\n"); +} + +void test_cors() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_list_t rule_list; + cos_list_init(&rule_list); + cos_cors_rule_content_t *rule_content = NULL; + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule1"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq1.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "xxx"); + rule_content->max_age_seconds = 3600; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule2"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq2.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "yyy"); + rule_content->max_age_seconds = 7200; + cos_list_add_tail(&rule_content->node, &rule_list); + + rule_content = cos_create_cors_rule_content(p); + cos_str_set(&rule_content->id, "testrule3"); + cos_str_set(&rule_content->allowed_origin, "http://www.qq3.com"); + cos_str_set(&rule_content->allowed_method, "GET"); + cos_str_set(&rule_content->allowed_header, "*"); + cos_str_set(&rule_content->expose_header, "zzz"); + rule_content->max_age_seconds = 60; + cos_list_add_tail(&rule_content->node, &rule_list); + + // put cors + s = cos_put_bucket_cors(options, &bucket, &rule_list, &resp_headers); + log_status(s); + + // get cors + cos_list_t rule_list_ret; + cos_list_init(&rule_list_ret); + s = cos_get_bucket_cors(options, &bucket, &rule_list_ret, &resp_headers); + log_status(s); + cos_cors_rule_content_t *content = NULL; + cos_list_for_each_entry(cos_cors_rule_content_t, content, &rule_list_ret, node) { + printf( + "cors id:%s, allowed_origin:%s, allowed_method:%s, allowed_header:%s, expose_header:%s, max_age_seconds:%d\n", + content->id.data, content->allowed_origin.data, content->allowed_method.data, content->allowed_header.data, + content->expose_header.data, content->max_age_seconds); + } + + // delete cors + cos_delete_bucket_cors(options, &bucket, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_versioning() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + cos_versioning_content_t *versioning = NULL; + versioning = cos_create_versioning_content(p); + cos_str_set(&versioning->status, "Suspended"); + + // put bucket versioning + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + + // get bucket versioning + cos_str_set(&versioning->status, ""); + s = cos_get_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + printf("bucket versioning status: %s\n", versioning->status.data); + + cos_pool_destroy(p); +} + +void test_replication() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_request_options_t *dst_options = NULL; + cos_string_t bucket; + cos_string_t dst_bucket; + cos_table_t *resp_headers = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&dst_bucket, "replicationtest"); + + dst_options = cos_request_options_create(p); + init_test_request_options(dst_options, is_cname); + cos_str_set(&dst_options->config->endpoint, "cn-east.myqcloud.com"); + + // enable bucket versioning + cos_versioning_content_t *versioning = NULL; + versioning = cos_create_versioning_content(p); + cos_str_set(&versioning->status, "Enabled"); + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + s = cos_put_bucket_versioning(dst_options, &dst_bucket, versioning, &resp_headers); + log_status(s); + + cos_replication_params_t *replication_param = NULL; + replication_param = cos_create_replication_params(p); + cos_str_set(&replication_param->role, "qcs::cam::uin/100000616666:uin/100000616666"); + + cos_replication_rule_content_t *rule = NULL; + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_01"); + cos_str_set(&rule->status, "Enabled"); + cos_str_set(&rule->prefix, "test1"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_02"); + cos_str_set(&rule->status, "Disabled"); + cos_str_set(&rule->prefix, "test2"); + cos_str_set(&rule->storage_class, "Standard_IA"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + rule = cos_create_replication_rule_content(p); + cos_str_set(&rule->id, "Rule_03"); + cos_str_set(&rule->status, "Enabled"); + cos_str_set(&rule->prefix, "test3"); + cos_str_set(&rule->storage_class, "Standard_IA"); + cos_str_set(&rule->dst_bucket, "qcs:id/0:cos:cn-east:appid/1253686666:replicationtest"); + cos_list_add_tail(&rule->node, &replication_param->rule_list); + + // put bucket replication + s = cos_put_bucket_replication(options, &bucket, replication_param, &resp_headers); + log_status(s); + + // get bucket replication + cos_replication_params_t *replication_param2 = NULL; + replication_param2 = cos_create_replication_params(p); + s = cos_get_bucket_replication(options, &bucket, replication_param2, &resp_headers); + log_status(s); + printf("ReplicationConfiguration role: %s\n", replication_param2->role.data); + cos_replication_rule_content_t *content = NULL; + cos_list_for_each_entry(cos_replication_rule_content_t, content, &replication_param2->rule_list, node) { + printf("ReplicationConfiguration rule, id:%s, status:%s, prefix:%s, dst_bucket:%s, storage_class:%s\n", + content->id.data, content->status.data, content->prefix.data, content->dst_bucket.data, + content->storage_class.data); + } + + // delete bucket replication + s = cos_delete_bucket_replication(options, &bucket, &resp_headers); + log_status(s); + + // disable bucket versioning + cos_str_set(&versioning->status, "Suspended"); + s = cos_put_bucket_versioning(options, &bucket, versioning, &resp_headers); + log_status(s); + s = cos_put_bucket_versioning(dst_options, &dst_bucket, versioning, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void test_presigned_url() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t presigned_url; + cos_table_t *params = NULL; + cos_table_t *headers = NULL; + int sign_host = 1; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + cos_gen_presigned_url(options, &bucket, &object, 300, HTTP_GET, &presigned_url); + printf("presigned_url: %s\n", presigned_url.data); + + // 添加您自己的params和headers + params = cos_table_make(options->pool, 0); + // cos_table_add(params, "param1", "value"); + headers = cos_table_make(options->pool, 0); + // cos_table_add(headers, "header1", "value"); + + // 强烈建议sign_host为1,这样强制把host头域加入签名列表,防止越权访问问题 + cos_gen_presigned_url_safe(options, &bucket, &object, 300, HTTP_GET, headers, params, sign_host, &presigned_url); + printf("presigned_url_safe: %s\n", presigned_url.data); + + cos_pool_destroy(p); +} + +void test_head_bucket() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + options->ctl = cos_http_controller_create(options->pool, 0); + + status = cos_head_bucket(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_check_bucket_exist() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + cos_bucket_exist_status_e bucket_exist; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 检查桶是否存在 + status = cos_check_bucket_exist(options, &bucket, &bucket_exist, &resp_headers); + if (bucket_exist == COS_BUCKET_NON_EXIST) { + printf("bucket: %.*s non exist.\n", bucket.len, bucket.data); + } else if (bucket_exist == COS_BUCKET_EXIST) { + printf("bucket: %.*s exist.\n", bucket.len, bucket.data); + } else { + printf("bucket: %.*s unknown status.\n", bucket.len, bucket.data); + log_status(status); + } + + cos_pool_destroy(pool); +} + +void test_get_service() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_get_service_params_t *list_params = NULL; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + + //创建get service参数, 默认获取全部bucket + list_params = cos_create_get_service_params(options->pool); + //若将all_region设置为0,则只根据options->config->endpoint的区域进行查询 + // list_params->all_region = 0; + + status = cos_get_service(options, list_params, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + cos_get_service_content_t *content = NULL; + char *line = NULL; + cos_list_for_each_entry(cos_get_service_content_t, content, &list_params->bucket_list, node) { + line = apr_psprintf(options->pool, "%.*s\t%.*s\t%.*s\n", content->bucket_name.len, content->bucket_name.data, + content->location.len, content->location.data, content->creation_date.len, + content->creation_date.data); + printf("%s", line); + } + + cos_pool_destroy(pool); +} + +void test_website() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_website_params_t *website_params = NULL; + cos_website_params_t *website_result = NULL; + cos_website_rule_content_t *website_content = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建website参数 + website_params = cos_create_website_params(options->pool); + cos_str_set(&website_params->index, "index.html"); + cos_str_set(&website_params->redirect_protocol, "https"); + cos_str_set(&website_params->error_document, "Error.html"); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_errcode, "404"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key, "404.html"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_prefix, "docs/"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key_prefix, "documents/"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + website_content = cos_create_website_rule_content(options->pool); + cos_str_set(&website_content->condition_prefix, "img/"); + cos_str_set(&website_content->redirect_protocol, "https"); + cos_str_set(&website_content->redirect_replace_key, "demo.jpg"); + cos_list_add_tail(&website_content->node, &website_params->rule_list); + + status = cos_put_bucket_website(options, &bucket, website_params, &resp_headers); + log_status(status); + + website_result = cos_create_website_params(options->pool); + status = cos_get_bucket_website(options, &bucket, website_result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + cos_website_rule_content_t *content = NULL; + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", website_result->index.len, website_result->index.data); + printf("index: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", website_result->redirect_protocol.len, + website_result->redirect_protocol.data); + printf("redirect protocol: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", website_result->error_document.len, website_result->error_document.data); + printf("error document: %s", line); + cos_list_for_each_entry(cos_website_rule_content_t, content, &website_result->rule_list, node) { + line = apr_psprintf(options->pool, "%.*s\t%.*s\t%.*s\t%.*s\t%.*s\n", content->condition_errcode.len, + content->condition_errcode.data, content->condition_prefix.len, content->condition_prefix.data, + content->redirect_protocol.len, content->redirect_protocol.data, + content->redirect_replace_key.len, content->redirect_replace_key.data, + content->redirect_replace_key_prefix.len, content->redirect_replace_key_prefix.data); + printf("%s", line); + } + + status = cos_delete_bucket_website(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_domain() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_domain_params_t *domain_params = NULL; + cos_domain_params_t *domain_result = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建domain参数 + domain_params = cos_create_domain_params(options->pool); + cos_str_set(&domain_params->status, "ENABLED"); + cos_str_set(&domain_params->name, "www.abc.com"); + cos_str_set(&domain_params->type, "REST"); + cos_str_set(&domain_params->forced_replacement, "CNAME"); + + status = cos_put_bucket_domain(options, &bucket, domain_params, &resp_headers); + log_status(status); + + domain_result = cos_create_domain_params(options->pool); + status = cos_get_bucket_domain(options, &bucket, domain_result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", domain_result->status.len, domain_result->status.data); + printf("status: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->name.len, domain_result->name.data); + printf("name: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->type.len, domain_result->type.data); + printf("type: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", domain_result->forced_replacement.len, + domain_result->forced_replacement.data); + printf("forced_replacement: %s", line); + + cos_pool_destroy(pool); +} + +void test_logging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_logging_params_t *params = NULL; + cos_logging_params_t *result = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //创建logging参数 + params = cos_create_logging_params(options->pool); + cos_str_set(¶ms->target_bucket, TEST_BUCKET_NAME); + cos_str_set(¶ms->target_prefix, "logging/"); + + status = cos_put_bucket_logging(options, &bucket, params, &resp_headers); + log_status(status); + + result = cos_create_logging_params(options->pool); + status = cos_get_bucket_logging(options, &bucket, result, &resp_headers); + log_status(status); + if (!cos_status_is_ok(status)) { + cos_pool_destroy(pool); + return; + } + + //查看结果 + char *line = NULL; + line = apr_psprintf(options->pool, "%.*s\n", result->target_bucket.len, result->target_bucket.data); + printf("target bucket: %s", line); + line = apr_psprintf(options->pool, "%.*s\n", result->target_prefix.len, result->target_prefix.data); + printf("target prefix: %s", line); + + cos_pool_destroy(pool); +} + +void test_inventory() { + cos_pool_t *pool = NULL; + int is_cname = 0; + int inum = 3, i, len; + char buf[inum][32]; + char dest_bucket[128]; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_inventory_params_t *get_params = NULL; + cos_inventory_optional_t *optional = NULL; + cos_list_inventory_params_t *list_params = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put bucket inventory + len = snprintf(dest_bucket, 128, "qcs::cos:%s::%s", TEST_REGION, TEST_BUCKET_NAME); + dest_bucket[len] = 0; + for (i = 0; i < inum; i++) { + cos_inventory_params_t *params = cos_create_inventory_params(pool); + cos_inventory_optional_t *optional; + len = snprintf(buf[i], 32, "id%d", i); + buf[i][len] = 0; + cos_str_set(¶ms->id, buf[i]); + cos_str_set(¶ms->is_enabled, "true"); + cos_str_set(¶ms->frequency, "Daily"); + cos_str_set(¶ms->filter_prefix, "myPrefix"); + cos_str_set(¶ms->included_object_versions, "All"); + cos_str_set(¶ms->destination.format, "CSV"); + cos_str_set(¶ms->destination.account_id, TEST_UIN); + cos_str_set(¶ms->destination.bucket, dest_bucket); + cos_str_set(¶ms->destination.prefix, "invent"); + params->destination.encryption = 1; + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "Size"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "LastModifiedDate"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "ETag"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "StorageClass"); + cos_list_add_tail(&optional->node, ¶ms->fields); + optional = cos_create_inventory_optional(pool); + cos_str_set(&optional->field, "ReplicationStatus"); + cos_list_add_tail(&optional->node, ¶ms->fields); + + status = cos_put_bucket_inventory(options, &bucket, params, &resp_headers); + log_status(status); + } + + // get inventory + get_params = cos_create_inventory_params(pool); + cos_str_set(&get_params->id, buf[inum / 2]); + status = cos_get_bucket_inventory(options, &bucket, get_params, &resp_headers); + log_status(status); + + printf("id: %s\nis_enabled: %s\nfrequency: %s\nfilter_prefix: %s\nincluded_object_versions: %s\n", + get_params->id.data, get_params->is_enabled.data, get_params->frequency.data, get_params->filter_prefix.data, + get_params->included_object_versions.data); + printf("destination:\n"); + printf("\tencryption: %d\n", get_params->destination.encryption); + printf("\tformat: %s\n", get_params->destination.format.data); + printf("\taccount_id: %s\n", get_params->destination.account_id.data); + printf("\tbucket: %s\n", get_params->destination.bucket.data); + printf("\tprefix: %s\n", get_params->destination.prefix.data); + cos_list_for_each_entry(cos_inventory_optional_t, optional, &get_params->fields, node) { + printf("field: %s\n", optional->field.data); + } + + // list inventory + list_params = cos_create_list_inventory_params(pool); + status = cos_list_bucket_inventory(options, &bucket, list_params, &resp_headers); + log_status(status); + + get_params = NULL; + cos_list_for_each_entry(cos_inventory_params_t, get_params, &list_params->inventorys, node) { + printf("id: %s\nis_enabled: %s\nfrequency: %s\nfilter_prefix: %s\nincluded_object_versions: %s\n", + get_params->id.data, get_params->is_enabled.data, get_params->frequency.data, get_params->filter_prefix.data, + get_params->included_object_versions.data); + printf("destination:\n"); + printf("\tencryption: %d\n", get_params->destination.encryption); + printf("\tformat: %s\n", get_params->destination.format.data); + printf("\taccount_id: %s\n", get_params->destination.account_id.data); + printf("\tbucket: %s\n", get_params->destination.bucket.data); + printf("\tprefix: %s\n", get_params->destination.prefix.data); + cos_list_for_each_entry(cos_inventory_optional_t, optional, &get_params->fields, node) { + printf("field: %s\n", optional->field.data); + } + } + + // delete inventory + for (i = 0; i < inum; i++) { + cos_string_t id; + cos_str_set(&id, buf[i]); + status = cos_delete_bucket_inventory(options, &bucket, &id, &resp_headers); + log_status(status); + } + + cos_pool_destroy(pool); +} + +void test_bucket_tagging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_tagging_params_t *params = NULL; + cos_tagging_params_t *result = NULL; + cos_tagging_tag_t *tag = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put tagging + params = cos_create_tagging_params(pool); + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "age"); + cos_str_set(&tag->value, "18"); + cos_list_add_tail(&tag->node, ¶ms->node); + + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "name"); + cos_str_set(&tag->value, "xiaoming"); + cos_list_add_tail(&tag->node, ¶ms->node); + + status = cos_put_bucket_tagging(options, &bucket, params, &resp_headers); + log_status(status); + + // get tagging + result = cos_create_tagging_params(pool); + status = cos_get_bucket_tagging(options, &bucket, result, &resp_headers); + log_status(status); + + tag = NULL; + cos_list_for_each_entry(cos_tagging_tag_t, tag, &result->node, node) { + printf("taging key: %s\n", tag->key.data); + printf("taging value: %s\n", tag->value.data); + } + + // delete tagging + status = cos_delete_bucket_tagging(options, &bucket, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +void test_object_tagging() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t version_id = cos_string(""); + cos_tagging_params_t *params = NULL; + cos_tagging_params_t *result = NULL; + cos_tagging_tag_t *tag = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, TEST_OBJECT_NAME1); + + // put object tagging + params = cos_create_tagging_params(pool); + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "age"); + cos_str_set(&tag->value, "18"); + cos_list_add_tail(&tag->node, ¶ms->node); + + tag = cos_create_tagging_tag(pool); + cos_str_set(&tag->key, "name"); + cos_str_set(&tag->value, "xiaoming"); + cos_list_add_tail(&tag->node, ¶ms->node); + + status = cos_put_object_tagging(options, &bucket, &object, &version_id, NULL, params, &resp_headers); + log_status(status); + + // get object tagging + result = cos_create_tagging_params(pool); + status = cos_get_object_tagging(options, &bucket, &object, &version_id, NULL, result, &resp_headers); + log_status(status); + + tag = NULL; + cos_list_for_each_entry(cos_tagging_tag_t, tag, &result->node, node) { + printf("taging key: %s\n", tag->key.data); + printf("taging value: %s\n", tag->value.data); + } + + // delete tagging + status = cos_delete_object_tagging(options, &bucket, &object, &version_id, NULL, &resp_headers); + log_status(status); + + cos_pool_destroy(pool); +} + +static void log_get_referer(cos_referer_params_t *result) { + int index = 0; + cos_referer_domain_t *domain; + + cos_warn_log("status: %s", result->status.data); + cos_warn_log("referer_type: %s", result->referer_type.data); + cos_warn_log("empty_refer_config: %s", result->empty_refer_config.data); + + cos_list_for_each_entry(cos_referer_domain_t, domain, &result->domain_list, node) { + cos_warn_log("domain index:%d", ++index); + cos_warn_log("domain: %s", domain->domain.data); + } +} + +void test_referer() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_referer_params_t *params = NULL; + cos_referer_domain_t *domain = NULL; + cos_referer_params_t *result = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/32492 + params = cos_create_referer_params(pool); + cos_str_set(¶ms->status, "Enabled"); + cos_str_set(¶ms->referer_type, "White-List"); + cos_str_set(¶ms->empty_refer_config, "Allow"); + domain = cos_create_referer_domain(pool); + cos_str_set(&domain->domain, "www.qq.com"); + cos_list_add_tail(&domain->node, ¶ms->domain_list); + domain = cos_create_referer_domain(pool); + cos_str_set(&domain->domain, "*.tencent.com"); + cos_list_add_tail(&domain->node, ¶ms->domain_list); + + // put referer + status = cos_put_bucket_referer(options, &bucket, params, &resp_headers); + log_status(status); + + // get referer + result = cos_create_referer_params(pool); + status = cos_get_bucket_referer(options, &bucket, result, &resp_headers); + log_status(status); + if (status->code == 200) { + log_get_referer(result); + } + + cos_pool_destroy(pool); +} + +void test_intelligenttiering() { + cos_pool_t *pool = NULL; + int is_cname = 0; + cos_status_t *status = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers = NULL; + cos_string_t bucket; + cos_intelligenttiering_params_t *params = NULL; + cos_intelligenttiering_params_t *result = NULL; + + //创建内存池 + cos_pool_create(&pool, NULL); + + //初始化请求选项 + options = cos_request_options_create(pool); + options->config = cos_config_create(options->pool); + + init_test_request_options(options, is_cname); + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // put intelligenttiering + params = cos_create_intelligenttiering_params(pool); + cos_str_set(¶ms->status, "Enabled"); + params->days = 30; + + status = cos_put_bucket_intelligenttiering(options, &bucket, params, &resp_headers); + log_status(status); + + // get intelligenttiering + result = cos_create_intelligenttiering_params(pool); + status = cos_get_bucket_intelligenttiering(options, &bucket, result, &resp_headers); + log_status(status); + + printf("status: %s\n", result->status.data); + printf("days: %d\n", result->days); + cos_pool_destroy(pool); +} + +void test_delete_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->prefix, "folder/"); + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + + s = cos_delete_objects(options, &bucket, &list_params->object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("delete objects failed, req_id:%s\n", s->req_id); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_list_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + // prefix表示列出的object的key以prefix开始 + cos_str_set(&list_params->prefix, "folder/"); + // deliter表示分隔符, 设置为/表示列出当前目录下的object, 设置为空表示列出所有的object + cos_str_set(&list_params->delimiter, "/"); + // 设置最大遍历出多少个对象, 一次listobject最大支持1000 + list_params->max_ret = 1000; + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + // list_params->object_list 返回列出的object对象。 + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %s\n", content->key.data); + } + // list_params->common_prefix_list 表示被delimiter截断的路径, 如delimter设置为/, common prefix则表示所有子目录的路径 + cos_list_object_common_prefix_t *common_prefix = NULL; + cos_list_for_each_entry(cos_list_object_common_prefix_t, common_prefix, &list_params->common_prefix_list, node) { + printf("common prefix: %s\n", common_prefix->prefix.data); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_list_all_objects() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + int is_truncated = 1; + cos_string_t marker; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + // 设置最大遍历出多少个对象, 一次listobject最大支持1000 + list_params->max_ret = 1000; + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + cos_list_init(&list_params->object_list); + s = cos_list_object(options, &bucket, list_params, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + // list_params->object_list 返回列出的object对象。 + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + printf("object: %s\n", content->key.data); + } + + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + cos_pool_destroy(p); +} + +void test_download_directory() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t file_name; + cos_string_t suffix = cos_string("/"); + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_table_t *params = NULL; + int is_truncated = 1; + cos_string_t marker; + apr_status_t status; + + //初始化请求选项 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // list object (get bucket) + cos_list_object_params_t *list_params = NULL; + list_params = cos_create_list_object_params(p); + cos_str_set(&list_params->prefix, "folder/"); //替换为您自己的目录名称 + cos_str_set(&marker, ""); + while (is_truncated) { + list_params->marker = marker; + s = cos_list_object(options, &bucket, list_params, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("list object failed, req_id:%s\n", s->req_id); + break; + } + cos_list_object_content_t *content = NULL; + cos_list_for_each_entry(cos_list_object_content_t, content, &list_params->object_list, node) { + cos_str_set(&file_name, content->key.data); + if (cos_ends_with(&content->key, &suffix)) { + //如果是目录需要先创建, 0x0755权限可以自己按需修改,参考apr_file_info.h中定义 + status = apr_dir_make(content->key.data, 0x0755, options->pool); + if (status != APR_SUCCESS && !APR_STATUS_IS_EEXIST(status)) { + printf("mkdir: %s failed, status: %d\n", content->key.data, status); + } + } else { + //下载对象到本地目录,这里默认下载在程序运行的当前目录 + s = cos_get_object_to_file(options, &bucket, &content->key, headers, params, &file_name, &resp_headers); + if (!cos_status_is_ok(s)) { + printf("get object[%s] failed, req_id:%s\n", content->key.data, s->req_id); + } + } + } + is_truncated = list_params->truncated; + marker = list_params->next_marker; + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void test_move() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t src_object; + cos_string_t src_endpoint; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + //设置对象复制 + cos_str_set(&object, TEST_OBJECT_NAME1); + cos_str_set(&src_endpoint, TEST_COS_ENDPOINT); + cos_str_set(&src_object, TEST_OBJECT_NAME2); + + cos_copy_object_params_t *params = NULL; + params = cos_create_copy_object_params(p); + s = cos_copy_object(options, &bucket, &src_object, &src_endpoint, &bucket, &object, NULL, params, &resp_headers); + log_status(s); + if (cos_status_is_ok(s)) { + s = cos_delete_object(options, &bucket, &src_object, &resp_headers); + log_status(s); + printf("move object succeeded\n"); + } else { + printf("move object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +// 基础图片处理 +void test_ci_base_image_process() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/thumbnail/!50p", ""); + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } + cos_pool_destroy(p); +} + +// 持久化处理 +void test_ci_image_process() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + ci_operation_result_t *results = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.jpg"); + + // 云上数据处理 + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"imageView2/format/png\"}]}"); + s = ci_image_process(options, &bucket, &object, headers, &resp_headers, &results); + log_status(s); + printf("origin key: %s\n", results->origin.key.data); + printf("process key: %s\n", results->object.key.data); + + // 上传时处理 + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"imageView2/format/png\"}]}"); + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = ci_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers, &results); + log_status(s); + printf("origin key: %s\n", results->origin.key.data); + printf("process key: %s\n", results->object.key.data); + + cos_pool_destroy(p); +} + +// 二维码识别 +void test_ci_image_qrcode() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + ci_operation_result_t *results = NULL; + ci_qrcode_info_t *content = NULL; + ci_qrcode_result_t *result2 = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + init_test_request_options(options, is_cname); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.jpg"); + + headers = cos_table_make(p, 1); + apr_table_addn(headers, "pic-operations", + "{\"is_pic_info\":1,\"rules\":[{\"fileid\":\"test.png\",\"rule\":\"QRcode/cover/1\"}]}"); + // 上传时识别 + cos_str_set(&file, "test.jpg"); + cos_str_set(&object, "test.jpg"); + s = ci_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers, &results); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("put object failed\n"); + } + printf("CodeStatus: %d\n", results->object.code_status); + cos_list_for_each_entry(ci_qrcode_info_t, content, &results->object.qrcode_info, node) { + printf("CodeUrl: %s\n", content->code_url.data); + printf("Point: %s\n", content->point[0].data); + printf("Point: %s\n", content->point[1].data); + printf("Point: %s\n", content->point[2].data); + printf("Point: %s\n", content->point[3].data); + } + + // 下载时识别 + s = ci_get_qrcode(options, &bucket, &object, 1, NULL, NULL, &resp_headers, &result2); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("get object failed\n"); + } + printf("CodeStatus: %d\n", result2->code_status); + cos_list_for_each_entry(ci_qrcode_info_t, content, &result2->qrcode_info, node) { + printf("CodeUrl: %s\n", content->code_url.data); + printf("Point: %s\n", content->point[0].data); + printf("Point: %s\n", content->point[1].data); + printf("Point: %s\n", content->point[2].data); + printf("Point: %s\n", content->point[3].data); + } + printf("ImageResult: %s\n", result2->result_image.data); + + //销毁内存池 + cos_pool_destroy(p); +} + +// 图片压缩 +void test_ci_image_compression() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers; + cos_table_t *params = NULL; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/format/tpg", ""); + cos_str_set(&object, "test.jpg"); + cos_str_set(&file, "test.tpg"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } + + params = cos_table_make(p, 1); + apr_table_addn(params, "imageMogr2/format/heif", ""); + cos_str_set(&file, "test.heif"); + s = cos_get_object_to_file(options, &bucket, &object, NULL, params, &file, &resp_headers); + log_status(s); + if (!cos_status_is_ok(s)) { + printf("cos_get_object_to_file fail, req_id:%s\n", s->req_id); + } +} + +static void log_video_auditing_result(ci_video_auditing_job_result_t *result) { + cos_warn_log("jobid: %s", result->jobs_detail.job_id.data); + cos_warn_log("state: %s", result->jobs_detail.state.data); + cos_warn_log("creation_time: %s", result->jobs_detail.creation_time.data); +} + +static void log_get_auditing_result(ci_auditing_job_result_t *result) { + int index = 0; + ci_auditing_snapshot_result_t *snapshot_info; + ci_auditing_audio_section_result_t *audio_section_info; + + cos_warn_log("nonexist_job_ids: %s", result->nonexist_job_ids.data); + cos_warn_log("code: %s", result->jobs_detail.code.data); + cos_warn_log("message: %s", result->jobs_detail.message.data); + cos_warn_log("state: %s", result->jobs_detail.state.data); + cos_warn_log("creation_time: %s", result->jobs_detail.creation_time.data); + cos_warn_log("object: %s", result->jobs_detail.object.data); + cos_warn_log("snapshot_count: %s", result->jobs_detail.snapshot_count.data); + cos_warn_log("result: %d", result->jobs_detail.result); + + cos_warn_log("porn_info.hit_flag: %d", result->jobs_detail.porn_info.hit_flag); + cos_warn_log("porn_info.count: %d", result->jobs_detail.porn_info.count); + cos_warn_log("terrorism_info.hit_flag: %d", result->jobs_detail.terrorism_info.hit_flag); + cos_warn_log("terrorism_info.count: %d", result->jobs_detail.terrorism_info.count); + cos_warn_log("politics_info.hit_flag: %d", result->jobs_detail.politics_info.hit_flag); + cos_warn_log("politics_info.count: %d", result->jobs_detail.politics_info.count); + cos_warn_log("ads_info.hit_flag: %d", result->jobs_detail.ads_info.hit_flag); + cos_warn_log("ads_info.count: %d", result->jobs_detail.ads_info.count); + + cos_list_for_each_entry(ci_auditing_snapshot_result_t, snapshot_info, &result->jobs_detail.snapshot_info_list, node) { + cos_warn_log("snapshot index:%d", ++index); + cos_warn_log("snapshot_info->url: %s", snapshot_info->url.data); + cos_warn_log("snapshot_info->snapshot_time: %d", snapshot_info->snapshot_time); + cos_warn_log("snapshot_info->text: %s", snapshot_info->text.data); + + cos_warn_log("snapshot_info->porn_info.hit_flag: %d", snapshot_info->porn_info.hit_flag); + cos_warn_log("snapshot_info->porn_info.score: %d", snapshot_info->porn_info.score); + cos_warn_log("snapshot_info->porn_info.label: %s", snapshot_info->porn_info.label.data); + cos_warn_log("snapshot_info->porn_info.sub_lable: %s", snapshot_info->porn_info.sub_lable.data); + cos_warn_log("snapshot_info->terrorism_info.hit_flag: %d", snapshot_info->terrorism_info.hit_flag); + cos_warn_log("snapshot_info->terrorism_info.score: %d", snapshot_info->terrorism_info.score); + cos_warn_log("snapshot_info->terrorism_info.label: %s", snapshot_info->terrorism_info.label.data); + cos_warn_log("snapshot_info->terrorism_info.sub_lable: %s", snapshot_info->terrorism_info.sub_lable.data); + cos_warn_log("snapshot_info->politics_info.hit_flag: %d", snapshot_info->politics_info.hit_flag); + cos_warn_log("snapshot_info->politics_info.score: %d", snapshot_info->politics_info.score); + cos_warn_log("snapshot_info->politics_info.label: %s", snapshot_info->politics_info.label.data); + cos_warn_log("snapshot_info->politics_info.sub_lable: %s", snapshot_info->politics_info.sub_lable.data); + cos_warn_log("snapshot_info->ads_info.hit_flag: %d", snapshot_info->ads_info.hit_flag); + cos_warn_log("snapshot_info->ads_info.score: %d", snapshot_info->ads_info.score); + cos_warn_log("snapshot_info->ads_info.label: %s", snapshot_info->ads_info.label.data); + cos_warn_log("snapshot_info->ads_info.sub_lable: %s", snapshot_info->ads_info.sub_lable.data); + } + + index = 0; + cos_list_for_each_entry(ci_auditing_audio_section_result_t, audio_section_info, + &result->jobs_detail.audio_section_info_list, node) { + cos_warn_log("audio_section index:%d", ++index); + cos_warn_log("audio_section_info->url: %s", audio_section_info->url.data); + cos_warn_log("audio_section_info->text: %s", audio_section_info->text.data); + cos_warn_log("audio_section_info->offset_time: %d", audio_section_info->offset_time); + cos_warn_log("audio_section_info->duration: %d", audio_section_info->duration); + + cos_warn_log("audio_section_info->porn_info.hit_flag: %d", audio_section_info->porn_info.hit_flag); + cos_warn_log("audio_section_info->porn_info.score: %d", audio_section_info->porn_info.score); + cos_warn_log("audio_section_info->porn_info.key_words: %s", audio_section_info->porn_info.key_words.data); + cos_warn_log("audio_section_info->terrorism_info.hit_flag: %d", audio_section_info->terrorism_info.hit_flag); + cos_warn_log("audio_section_info->terrorism_info.score: %d", audio_section_info->terrorism_info.score); + cos_warn_log("audio_section_info->terrorism_info.key_words: %s", audio_section_info->terrorism_info.key_words.data); + cos_warn_log("audio_section_info->politics_info.hit_flag: %d", audio_section_info->politics_info.hit_flag); + cos_warn_log("audio_section_info->politics_info.score: %d", audio_section_info->politics_info.score); + cos_warn_log("audio_section_info->politics_info.key_words: %s", audio_section_info->politics_info.key_words.data); + cos_warn_log("audio_section_info->ads_info.hit_flag: %d", audio_section_info->ads_info.hit_flag); + cos_warn_log("audio_section_info->ads_info.score: %d", audio_section_info->ads_info.score); + cos_warn_log("audio_section_info->ads_info.key_words: %s", audio_section_info->ads_info.key_words.data); + } +} + +static void log_media_buckets_result(ci_media_buckets_result_t *result) { + int index = 0; + ci_media_bucket_list_t *media_bucket; + + cos_warn_log("total_count: %d", result->total_count); + cos_warn_log("page_number: %d", result->page_number); + cos_warn_log("page_size: %d", result->page_size); + + cos_list_for_each_entry(ci_media_bucket_list_t, media_bucket, &result->media_bucket_list, node) { + cos_warn_log("media_bucket index:%d", ++index); + cos_warn_log("media_bucket->bucket_id: %s", media_bucket->bucket_id.data); + cos_warn_log("media_bucket->name: %s", media_bucket->name.data); + cos_warn_log("media_bucket->region: %s", media_bucket->region.data); + cos_warn_log("media_bucket->create_time: %s", media_bucket->create_time.data); + } +} + +static void log_media_info_result(ci_media_info_result_t *result) { + // format + cos_warn_log("format.num_stream: %d", result->format.num_stream); + cos_warn_log("format.num_program: %d", result->format.num_program); + cos_warn_log("format.format_name: %s", result->format.format_name.data); + cos_warn_log("format.format_long_name: %s", result->format.format_long_name.data); + cos_warn_log("format.start_time: %f", result->format.start_time); + cos_warn_log("format.duration: %f", result->format.duration); + cos_warn_log("format.bit_rate: %d", result->format.bit_rate); + cos_warn_log("format.size: %d", result->format.size); + + // stream.video + cos_warn_log("stream.video.index: %d", result->stream.video.index); + cos_warn_log("stream.video.codec_name: %s", result->stream.video.codec_name.data); + cos_warn_log("stream.video.codec_long_name: %s", result->stream.video.codec_long_name.data); + cos_warn_log("stream.video.codec_time_base: %s", result->stream.video.codec_time_base.data); + cos_warn_log("stream.video.codec_tag_string: %s", result->stream.video.codec_tag_string.data); + cos_warn_log("stream.video.codec_tag: %s", result->stream.video.codec_tag.data); + cos_warn_log("stream.video.profile: %s", result->stream.video.profile.data); + cos_warn_log("stream.video.height: %d", result->stream.video.height); + cos_warn_log("stream.video.width: %d", result->stream.video.width); + cos_warn_log("stream.video.has_b_frame: %d", result->stream.video.has_b_frame); + cos_warn_log("stream.video.ref_frames: %d", result->stream.video.ref_frames); + cos_warn_log("stream.video.sar: %s", result->stream.video.sar.data); + cos_warn_log("stream.video.dar: %s", result->stream.video.dar.data); + cos_warn_log("stream.video.pix_format: %s", result->stream.video.pix_format.data); + cos_warn_log("stream.video.field_order: %s", result->stream.video.field_order.data); + cos_warn_log("stream.video.level: %d", result->stream.video.level); + cos_warn_log("stream.video.fps: %d", result->stream.video.fps); + cos_warn_log("stream.video.avg_fps: %s", result->stream.video.avg_fps.data); + cos_warn_log("stream.video.timebase: %s", result->stream.video.timebase.data); + cos_warn_log("stream.video.start_time: %f", result->stream.video.start_time); + cos_warn_log("stream.video.duration: %f", result->stream.video.duration); + cos_warn_log("stream.video.bit_rate: %f", result->stream.video.bit_rate); + cos_warn_log("stream.video.num_frames: %d", result->stream.video.num_frames); + cos_warn_log("stream.video.language: %s", result->stream.video.language.data); + + // stream.audio + cos_warn_log("stream.audio.index: %d", result->stream.audio.index); + cos_warn_log("stream.audio.codec_name: %s", result->stream.audio.codec_name.data); + cos_warn_log("stream.audio.codec_long_name: %s", result->stream.audio.codec_long_name.data); + cos_warn_log("stream.audio.codec_time_base: %s", result->stream.audio.codec_time_base.data); + cos_warn_log("stream.audio.codec_tag_string: %s", result->stream.audio.codec_tag_string.data); + cos_warn_log("stream.audio.codec_tag: %s", result->stream.audio.codec_tag.data); + cos_warn_log("stream.audio.sample_fmt: %s", result->stream.audio.sample_fmt.data); + cos_warn_log("stream.audio.sample_rate: %d", result->stream.audio.sample_rate); + cos_warn_log("stream.audio.channel: %d", result->stream.audio.channel); + cos_warn_log("stream.audio.channel_layout: %s", result->stream.audio.channel_layout.data); + cos_warn_log("stream.audio.timebase: %s", result->stream.audio.timebase.data); + cos_warn_log("stream.audio.start_time: %f", result->stream.audio.start_time); + cos_warn_log("stream.audio.duration: %f", result->stream.audio.duration); + cos_warn_log("stream.audio.bit_rate: %f", result->stream.audio.bit_rate); + cos_warn_log("stream.audio.language: %s", result->stream.audio.language.data); + + // stream.subtitle + cos_warn_log("stream.subtitle.index: %d", result->stream.subtitle.index); + cos_warn_log("stream.subtitle.language: %s", result->stream.subtitle.language.data); +} + +void test_ci_video_auditing() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + ci_video_auditing_job_options_t *job_options; + ci_video_auditing_job_result_t *job_result; + ci_auditing_job_result_t *auditing_result; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_CI_ENDPOINT); // https://ci..myqcloud.com + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/47316 + job_options = ci_video_auditing_job_options_create(p); + cos_str_set(&job_options->input_object, "test.mp4"); + cos_str_set(&job_options->job_conf.detect_type, "Porn,Terrorism,Politics,Ads"); + cos_str_set(&job_options->job_conf.callback_version, "Detail"); + job_options->job_conf.detect_content = 1; + cos_str_set(&job_options->job_conf.snapshot.mode, "Interval"); + job_options->job_conf.snapshot.time_interval = 1.5; + job_options->job_conf.snapshot.count = 10; + + // 提交一个视频审核任务 + s = ci_create_video_auditing_job(options, &bucket, job_options, NULL, &resp_headers, &job_result); + log_status(s); + if (s->code == 200) { + log_video_auditing_result(job_result); + } + + // 等待视频审核任务完成,此处可修改您的等待时间 + sleep(300); + + // 获取审核任务结果 + s = ci_get_auditing_job(options, &bucket, &job_result->jobs_detail.job_id, NULL, &resp_headers, &auditing_result); + log_status(s); + if (s->code == 200) { + log_get_auditing_result(auditing_result); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_media_bucket() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_table_t *resp_headers; + ci_media_buckets_request_t *media_buckets_request; + ci_media_buckets_result_t *media_buckets_result; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_CI_ENDPOINT); // https://ci..myqcloud.com + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/48988 + media_buckets_request = ci_media_buckets_request_create(p); + cos_str_set(&media_buckets_request->regions, ""); + cos_str_set(&media_buckets_request->bucket_names, ""); + cos_str_set(&media_buckets_request->bucket_name, ""); + cos_str_set(&media_buckets_request->page_number, "1"); + cos_str_set(&media_buckets_request->page_size, "10"); + s = ci_describe_media_buckets(options, media_buckets_request, NULL, &resp_headers, &media_buckets_result); + log_status(s); + if (s->code == 200) { + log_media_buckets_result(media_buckets_result); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_snapshot() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + cos_list_t download_buffer; + cos_string_t object; + ci_get_snapshot_request_t *snapshot_request; + cos_buf_t *content = NULL; + cos_string_t pic_file = cos_string("snapshot.jpg"); + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.mp4"); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/55671 + snapshot_request = ci_snapshot_request_create(p); + snapshot_request->time = 7.5; + snapshot_request->width = 0; + snapshot_request->height = 0; + cos_str_set(&snapshot_request->format, "jpg"); + cos_str_set(&snapshot_request->rotate, "auto"); + cos_str_set(&snapshot_request->mode, "exactframe"); + cos_list_init(&download_buffer); + + s = ci_get_snapshot_to_buffer(options, &bucket, &object, snapshot_request, NULL, &download_buffer, &resp_headers); + log_status(s); + + int64_t len = 0; + int64_t size = 0; + int64_t pos = 0; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { len += cos_buf_size(content); } + char *buf = cos_pcalloc(p, (apr_size_t)(len + 1)); + buf[len] = '\0'; + cos_list_for_each_entry(cos_buf_t, content, &download_buffer, node) { + size = cos_buf_size(content); + memcpy(buf + pos, content->pos, (size_t)size); + pos += size; + } + cos_warn_log("Download len:%ld data=%s", len, buf); + + s = ci_get_snapshot_to_file(options, &bucket, &object, snapshot_request, NULL, &pic_file, &resp_headers); + log_status(s); + + // 销毁内存池 + cos_pool_destroy(p); +} + +void test_ci_media_process_media_info() { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_table_t *resp_headers; + ci_media_info_result_t *media_info; + cos_string_t object; + + // 基本配置 + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + options->config = cos_config_create(options->pool); + cos_str_set(&options->config->endpoint, TEST_COS_ENDPOINT); + cos_str_set(&options->config->access_key_id, TEST_ACCESS_KEY_ID); + cos_str_set(&options->config->access_key_secret, TEST_ACCESS_KEY_SECRET); + cos_str_set(&options->config->appid, TEST_APPID); + options->config->is_cname = is_cname; + options->ctl = cos_http_controller_create(options->pool, 0); + cos_str_set(&bucket, TEST_BUCKET_NAME); + cos_str_set(&object, "test.mp4"); + + // 替换为您的配置信息,可参见文档 https://cloud.tencent.com/document/product/436/55672 + s = ci_get_media_info(options, &bucket, &object, NULL, &resp_headers, &media_info); + log_status(s); + if (s->code == 200) { + log_media_info_result(media_info); + } + + // 销毁内存池 + cos_pool_destroy(p); +} + +int main(int argc, char *argv[]) { + // 通过环境变量获取 SECRETID 和 SECRETKEY + // TEST_ACCESS_KEY_ID = getenv("COS_SECRETID"); + // TEST_ACCESS_KEY_SECRET = getenv("COS_SECRETKEY"); + + if (cos_http_io_initialize(NULL, 0) != COSE_OK) { + exit(1); + } + + // set log level, default COS_LOG_WARN + cos_log_set_level(COS_LOG_WARN); + + // set log output, default stderr + cos_log_set_output(NULL); + + // test_intelligenttiering(); + // test_bucket_tagging(); + // test_object_tagging(); + // test_referer(); + // test_logging(); + // test_inventory(); + // test_put_object_from_file_with_sse(); + // test_get_object_to_file_with_sse(); + // test_head_bucket(); + // test_check_bucket_exist(); + // test_get_service(); + // test_website(); + // test_domain(); + // test_delete_objects(); + // test_delete_objects_by_prefix(); + // test_bucket(); + // test_bucket_lifecycle(); + // test_object_restore(); + test_put_object_from_file(); + // test_sign(); + // test_object(); + // test_put_object_with_limit(); + // test_get_object_with_limit(); + // test_head_object(); + // test_gen_object_url(); + // test_list_objects(); + // test_list_directory(); + // test_list_all_objects(); + // test_create_dir(); + // test_append_object(); + // test_check_object_exist(); + // multipart_upload_file_from_file(); + // multipart_upload_file_from_buffer(); + // abort_multipart_upload(); + // list_multipart(); + + // pthread_t tid[20]; + // test_resumable_upload_with_multi_threads(); + // test_resumable(); + // test_bucket(); + // test_acl(); + // test_copy(); + // test_modify_storage_class(); + // test_cors(); + // test_versioning(); + // test_replication(); + // test_part_copy(); + // test_copy_with_part_copy(); + // test_move(); + // test_delete_directory(); + // test_download_directory(); + // test_presigned_url(); + // test_ci_base_image_process(); + // test_ci_image_process(); + // test_ci_image_qrcode(); + // test_ci_image_compression(); + // test_ci_video_auditing(); + // test_ci_media_process_media_bucket(); + // test_ci_media_process_snapshot(); + // test_ci_media_process_media_info(); + + // cos_http_io_deinitialize last + cos_http_io_deinitialize(); + + return 0; +} diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 0546ed7f47..1f6d0800a5 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -14,8 +14,8 @@ */ #define _DEFAULT_SOURCE -#include "os.h" #include "tglobal.h" +#include "os.h" #include "tconfig.h" #include "tgrant.h" #include "tlog.h" @@ -63,7 +63,7 @@ int32_t tsNumOfQnodeFetchThreads = 1; int32_t tsNumOfSnodeStreamThreads = 4; int32_t tsNumOfSnodeWriteThreads = 1; int32_t tsMaxStreamBackendCache = 128; // M -int32_t tsPQSortMemThreshold = 16; // M +int32_t tsPQSortMemThreshold = 16; // M // sync raft int32_t tsElectInterval = 25 * 1000; @@ -121,8 +121,8 @@ int32_t tsQueryPolicy = 1; int32_t tsQueryRspPolicy = 0; int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT bool tsEnableQueryHb = true; -bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true -bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true +bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true +bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true int32_t tsQuerySmaOptimize = 0; int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data. bool tsQueryPlannerTrace = false; @@ -235,6 +235,13 @@ int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000; bool tsFilterScalarMode = false; int32_t tsKeepTimeOffset = 0; // latency of data migration +char tsS3Endpoint[TSDB_FQDN_LEN] = ""; +char tsS3AcessKeyId[TSDB_FQDN_LEN] = ""; +char tsS3AcessKeySecret[TSDB_FQDN_LEN] = ""; +char tsS3BucketName[TSDB_FQDN_LEN] = ""; +char tsS3AppId[TSDB_FQDN_LEN] = ""; +int8_t tsS3Enabled = false; + #ifndef _STORAGE int32_t taosSetTfsCfg(SConfig *pCfg) { SConfigItem *pItem = cfgGetItem(pCfg, "dataDir"); @@ -256,7 +263,9 @@ int32_t taosSetTfsCfg(SConfig *pCfg) { int32_t taosSetTfsCfg(SConfig *pCfg); #endif -struct SConfig *taosGetCfg() { return tsCfg; } +struct SConfig *taosGetCfg() { + return tsCfg; +} static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *inputCfgDir, const char *envFile, char *apolloUrl) { @@ -376,7 +385,9 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, CFG_SCOPE_CLIENT) != 0) return -1; + if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, + CFG_SCOPE_CLIENT) != 0) + return -1; if (cfgAddInt32(pCfg, "metaCacheMaxSize", tsMetaCacheMaxSize, -1, INT32_MAX, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddInt32(pCfg, "slowLogThreshold", tsSlowLogThreshold, 0, INT32_MAX, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddString(pCfg, "slowLogScope", "", CFG_SCOPE_CLIENT) != 0) return -1; @@ -389,7 +400,8 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "numOfRpcSessions", tsNumOfRpcSessions, 1, 100000, CFG_SCOPE_BOTH) != 0) return -1; tsTimeToGetAvailableConn = TRANGE(tsTimeToGetAvailableConn, 20, 10000000); - if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH) != 0) return -1; + if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH) != 0) + return -1; tsNumOfTaskQueueThreads = tsNumOfCores / 2; tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4); @@ -449,7 +461,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 1, 1000000, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, CFG_SCOPE_CLIENT) != 0) return -1; - if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, + CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "queryBufferSize", tsQueryBufferSize, -1, 500000000000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "printAuth", tsPrintAuth, CFG_SCOPE_SERVER) != 0) return -1; @@ -477,7 +491,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4); if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 4, 1024, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER) != 0) + return -1; tsNumOfVnodeFetchThreads = tsNumOfCores / 4; tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4); @@ -497,7 +512,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfSnodeStreamThreads = tsNumOfCores / 4; tsNumOfSnodeStreamThreads = TRANGE(tsNumOfSnodeStreamThreads, 2, 4); - if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER) != 0) + return -1; tsNumOfSnodeWriteThreads = tsNumOfCores / 4; tsNumOfSnodeWriteThreads = TRANGE(tsNumOfSnodeWriteThreads, 2, 4); @@ -505,14 +521,18 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1; tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL); - if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_BOTH) != 0) + if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, + CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) + return -1; + if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) + return -1; - if (cfgAddInt64(pCfg, "vndCommitMaxInterval", tsVndCommitMaxIntervalMs, 1000, 1000 * 60 * 60, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt64(pCfg, "vndCommitMaxInterval", tsVndCommitMaxIntervalMs, 1000, 1000 * 60 * 60, CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, CFG_SCOPE_SERVER) != 0) return -1; @@ -542,7 +562,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX, CFG_SCOPE_SERVER) != 0) + if (cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX, + CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "udf", tsStartUdfd, CFG_SCOPE_SERVER) != 0) return -1; @@ -553,13 +574,16 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt64(pCfg, "streamBufferSize", tsStreamBufferSize, 0, INT64_MAX, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt64(pCfg, "checkpointInterval", tsCheckpointInterval, 0, INT64_MAX, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "keepTimeOffset", tsKeepTimeOffset, 0, 23, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1; + GRANT_CFG_ADD; return 0; } @@ -908,7 +932,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; - tmqMaxTopicNum= cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; + tmqMaxTopicNum = cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32; tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; @@ -948,6 +972,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32; tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32; + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); + GRANT_CFG_GET; return 0; } @@ -1020,7 +1046,7 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosSetCoreDump(enableCore); } else if (strcasecmp("enableQueryHb", name) == 0) { tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval; - } else if (strcasecmp("ttlChangeOnWrite", name) == 0) { + } else if (strcasecmp("ttlChangeOnWrite", name) == 0) { tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval; } break; @@ -1249,9 +1275,9 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { // tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval; // } else if (strcasecmp("smlBatchSize", name) == 0) { // tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32; - } else if(strcasecmp("smlTsDefaultName", name) == 0) { + } else if (strcasecmp("smlTsDefaultName", name) == 0) { tstrncpy(tsSmlTsDefaultName, cfgGetItem(pCfg, "smlTsDefaultName")->str, TSDB_COL_NAME_LEN); - } else if(strcasecmp("smlDot2Underline", name) == 0) { + } else if (strcasecmp("smlDot2Underline", name) == 0) { tsSmlDot2Underline = cfgGetItem(pCfg, "smlDot2Underline")->bval; } else if (strcasecmp("shellActivityTimer", name) == 0) { tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32; @@ -1272,6 +1298,8 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp); snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port); cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype); + } else if (strcasecmp("s3BucketName", name) == 0) { + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); } else if (strcasecmp("sDebugFlag", name) == 0) { sDebugFlag = cfgGetItem(pCfg, "sDebugFlag")->i32; } else if (strcasecmp("smaDebugFlag", name) == 0) { diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 194ffa16f6..0612f924f5 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -8,6 +8,7 @@ set( "src/vnd/vnodeCommit.c" "src/vnd/vnodeQuery.c" "src/vnd/vnodeModule.c" + "src/vnd/vnodeCos.c" "src/vnd/vnodeSvr.c" "src/vnd/vnodeSync.c" "src/vnd/vnodeSnapshot.c" @@ -134,6 +135,11 @@ else() endif() endif() +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) + target_link_libraries( vnode PUBLIC os @@ -153,6 +159,13 @@ target_link_libraries( PUBLIC transport PUBLIC stream PUBLIC index + + # s3 + cos_c_sdk + ${APR_UTIL_LIBRARY} + ${APR_LIBRARY} + ${MINIXML_LIBRARY} + ${CURL_LIBRARY} ) IF (TD_GRANT) @@ -169,7 +182,20 @@ if(${BUILD_WITH_ROCKSDB}) add_definitions(-DUSE_ROCKSDB) endif(${BUILD_WITH_ROCKSDB}) - +# s3 +FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) +IF (APR_CONFIG_BIN) + EXECUTE_PROCESS( + COMMAND ${APR_CONFIG_BIN} --includedir + OUTPUT_VARIABLE APR_INCLUDE_DIR + OUTPUT_STRIP_TRAILING_WHITESPACE + ) +ENDIF() +include_directories (${APR_INCLUDE_DIR}) +target_include_directories( + vnode + PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + ) if(${BUILD_TEST}) add_subdirectory(test) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h new file mode 100644 index 0000000000..b8510213d7 --- /dev/null +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef _TD_VND_COS_H_ +#define _TD_VND_COS_H_ + +#include "vnd.h" + +#ifdef __cplusplus +extern "C" { +#endif + +extern int8_t tsS3Enabled; + +int32_t s3Init(); +void s3CleanUp(); +void s3PutObjectFromFile(const char *file, const char *object); +void s3DeleteObjects(const char *object_name[], int nobject); + +#ifdef __cplusplus +} +#endif + +#endif /*_TD_VND_COS_H_*/ diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index a4d5715083..ebe20c0e85 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -15,6 +15,7 @@ #include "tsdb.h" #include "tsdbFS2.h" +#include "vndCos.h" typedef struct { STsdb *tsdb; @@ -41,6 +42,28 @@ static int32_t tsdbDoRemoveFileObject(SRTNer *rtner, const STFileObj *fobj) { return TARRAY2_APPEND(rtner->fopArr, op); } +static int32_t tsdbRemoveFileObjectS3(SRTNer *rtner, const STFileObj *fobj) { + int32_t code = 0, lino = 0; + + STFileOp op = { + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + const char *object_name = taosDirEntryBaseName((char *)fobj->fname); + s3DeleteObjects(&object_name, 1); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + } + return code; +} + static int32_t tsdbDoCopyFile(SRTNer *rtner, const STFileObj *from, const STFile *to) { int32_t code = 0; int32_t lino = 0; @@ -76,6 +99,33 @@ _exit: return code; } +static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile *to) { + int32_t code = 0; + int32_t lino = 0; + + char fname[TSDB_FILENAME_LEN]; + TdFilePtr fdFrom = NULL; + TdFilePtr fdTo = NULL; + + tsdbTFileName(rtner->tsdb, to, fname); + + fdFrom = taosOpenFile(from->fname, TD_FILE_READ); + if (fdFrom == NULL) code = terrno; + TSDB_CHECK_CODE(code, lino, _exit); + + char *object_name = taosDirEntryBaseName(fname); + s3PutObjectFromFile(from->fname, object_name); + + taosCloseFile(&fdFrom); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + taosCloseFile(&fdFrom); + } + return code; +} + static int32_t tsdbDoMigrateFileObj(SRTNer *rtner, const STFileObj *fobj, const SDiskID *did) { int32_t code = 0; int32_t lino = 0; @@ -123,6 +173,53 @@ _exit: return code; } +static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const SDiskID *did) { + int32_t code = 0; + int32_t lino = 0; + STFileOp op = {0}; + + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + // create new + op = (STFileOp){ + .optype = TSDB_FOP_CREATE, + .fid = fobj->f->fid, + .nf = + { + .type = fobj->f->type, + .did = did[0], + .fid = fobj->f->fid, + .cid = fobj->f->cid, + .size = fobj->f->size, + .stt[0] = + { + .level = fobj->f->stt[0].level, + }, + }, + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + + // do copy the file + code = tsdbCopyFileS3(rtner, fobj, &op.nf); + TSDB_CHECK_CODE(code, lino, _exit); + +_exit: + if (code) { + TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); + } + return code; +} + typedef struct { STsdb *tsdb; int32_t sync; @@ -201,8 +298,14 @@ static int32_t tsdbDoRetention2(void *arg) { for (int32_t ftype = 0; (ftype < TSDB_FTYPE_MAX) && (fobj = rtner->ctx->fset->farr[ftype], 1); ++ftype) { if (fobj == NULL) continue; - code = tsdbDoRemoveFileObject(rtner, fobj); - TSDB_CHECK_CODE(code, lino, _exit); + int32_t nlevel = tfsGetLevel(rtner->tsdb->pVnode->pTfs); + if (tsS3Enabled && nlevel > 1 && TSDB_FTYPE_DATA == ftype && fobj->f->did.level == nlevel - 1) { + code = tsdbRemoveFileObjectS3(rtner, fobj); + TSDB_CHECK_CODE(code, lino, _exit); + } else { + code = tsdbDoRemoveFileObject(rtner, fobj); + TSDB_CHECK_CODE(code, lino, _exit); + } } SSttLvl *lvl; @@ -228,8 +331,15 @@ static int32_t tsdbDoRetention2(void *arg) { if (fobj == NULL) continue; if (fobj->f->did.level == did.level) continue; - code = tsdbDoMigrateFileObj(rtner, fobj, &did); - TSDB_CHECK_CODE(code, lino, _exit); + + int32_t nlevel = tfsGetLevel(rtner->tsdb->pVnode->pTfs); + if (tsS3Enabled && nlevel > 1 && TSDB_FTYPE_DATA == ftype && did.level == nlevel - 1) { + code = tsdbMigrateDataFileS3(rtner, fobj, &did); + TSDB_CHECK_CODE(code, lino, _exit); + } else { + code = tsdbDoMigrateFileObj(rtner, fobj, &did); + TSDB_CHECK_CODE(code, lino, _exit); + } } // stt @@ -281,4 +391,4 @@ int32_t tsdbRetention(STsdb *tsdb, int64_t now, int32_t sync) { tsdbFreeRtnArg(arg); } return code; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c new file mode 100644 index 0000000000..1507df7074 --- /dev/null +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -0,0 +1,114 @@ +#define ALLOW_FORBID_FUNC + +#include "vndCos.h" + +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + +extern char tsS3Endpoint[]; +extern char tsS3AcessKeyId[]; +extern char tsS3AcessKeySecret[]; +extern char tsS3BucketName[]; +extern char tsS3AppId[]; + +int32_t s3Init() { + if (cos_http_io_initialize(NULL, 0) != COSE_OK) { + return -1; + } + + // set log level, default COS_LOG_WARN + cos_log_set_level(COS_LOG_WARN); + + // set log output, default stderr + cos_log_set_output(NULL); + + return 0; +} + +void s3CleanUp() { cos_http_io_deinitialize(); } + +static void log_status(cos_status_t *s) { + cos_warn_log("status->code: %d", s->code); + if (s->error_code) cos_warn_log("status->error_code: %s", s->error_code); + if (s->error_msg) cos_warn_log("status->error_msg: %s", s->error_msg); + if (s->req_id) cos_warn_log("status->req_id: %s", s->req_id); +} + +static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { + options->config = cos_config_create(options->pool); + + cos_config_t *config = options->config; + + cos_str_set(&config->endpoint, tsS3Endpoint); + cos_str_set(&config->access_key_id, tsS3AcessKeyId); + cos_str_set(&config->access_key_secret, tsS3AcessKeySecret); + cos_str_set(&config->appid, tsS3AppId); + + config->is_cname = is_cname; + + options->ctl = cos_http_controller_create(options->pool, 0); +} + +void s3PutObjectFromFile(const char *file_str, const char *object_str) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket, object, file; + cos_table_t *resp_headers; + int traffic_limit = 0; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_table_t *headers = NULL; + if (traffic_limit) { + // 限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + cos_str_set(&bucket, tsS3BucketName); + cos_str_set(&file, file_str); + cos_str_set(&object, object_str); + s = cos_put_object_from_file(options, &bucket, &object, &file, headers, &resp_headers); + log_status(s); + + cos_pool_destroy(p); +} + +void s3DeleteObjects(const char *object_name[], int nobject) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_string_t bucket; + cos_table_t *resp_headers = NULL; + cos_request_options_t *options = NULL; + cos_list_t object_list; + cos_list_t deleted_object_list; + int is_quiet = COS_TRUE; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + + cos_list_init(&object_list); + cos_list_init(&deleted_object_list); + + for (int i = 0; i < nobject; ++i) { + cos_object_key_t *content = cos_create_cos_object_key(p); + cos_str_set(&content->key, object_name[i]); + cos_list_add_tail(&content->node, &object_list); + } + + cos_status_t *s = cos_delete_objects(options, &bucket, &object_list, is_quiet, &resp_headers, &deleted_object_list); + log_status(s); + + cos_pool_destroy(p); + + if (cos_status_is_ok(s)) { + cos_warn_log("delete objects succeeded\n"); + } else { + cos_warn_log("delete objects failed\n"); + } +} diff --git a/source/dnode/vnode/src/vnd/vnodeModule.c b/source/dnode/vnode/src/vnd/vnodeModule.c index 74a8d14a86..6ccce5c9d7 100644 --- a/source/dnode/vnode/src/vnd/vnodeModule.c +++ b/source/dnode/vnode/src/vnd/vnodeModule.c @@ -14,6 +14,7 @@ */ #include "vnd.h" +#include "vndCos.h" typedef struct SVnodeTask SVnodeTask; struct SVnodeTask { @@ -81,6 +82,9 @@ int vnodeInit(int nthreads) { if (tqInit() < 0) { return -1; } + if (s3Init() < 0) { + return -1; + } return 0; } @@ -112,6 +116,7 @@ void vnodeCleanup() { walCleanUp(); tqCleanUp(); smaCleanUp(); + s3CleanUp(); } int vnodeScheduleTaskEx(int tpid, int (*execute)(void*), void* arg) { From ebd09ca532aecdd448dfe6693740270d55150f44 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 13:44:17 +0800 Subject: [PATCH 006/147] tsdb/write: use keep1 as minKey instead of keep2 --- source/dnode/vnode/src/tsdb/tsdbWrite.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c index 2dbac956ed..6e89b47adc 100644 --- a/source/dnode/vnode/src/tsdb/tsdbWrite.c +++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c @@ -76,7 +76,7 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq2 *pMsg) { int32_t code = 0; STsdbKeepCfg *pCfg = &pTsdb->keepCfg; TSKEY now = taosGetTimestamp(pCfg->precision); - TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep2; + TSKEY minKey = now - tsTickPerMin[pCfg->precision] * pCfg->keep1; TSKEY maxKey = tsMaxKeyByPrecision[pCfg->precision]; int32_t size = taosArrayGetSize(pMsg->aSubmitTbData); @@ -107,4 +107,4 @@ int tsdbScanAndConvertSubmitMsg(STsdb *pTsdb, SSubmitReq2 *pMsg) { _exit: return code; -} \ No newline at end of file +} From 87783a965082757fb358a5f4ceb8882f6303dfe6 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 15:37:52 +0800 Subject: [PATCH 007/147] s3query/get: pull object to local --- source/dnode/vnode/src/inc/vndCos.h | 3 + .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 21 +++++- source/dnode/vnode/src/vnd/vnodeCos.c | 75 +++++++++++++++++++ 3 files changed, 96 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index b8510213d7..d4e19e9031 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -28,6 +28,9 @@ int32_t s3Init(); void s3CleanUp(); void s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); +bool s3Exists(const char *object_name); +void s3Get(const char *object_name, const char *path); +void s3EvictCache(); #ifdef __cplusplus } diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 89b7d019ae..96037ff6be 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -14,6 +14,7 @@ */ #include "tsdb.h" +#include "vndCos.h" // =============== PAGE-WISE FILE =============== int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **ppFD) { @@ -34,9 +35,23 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p pFD->flag = flag; pFD->pFD = taosOpenFile(path, flag); if (pFD->pFD == NULL) { - code = TAOS_SYSTEM_ERROR(errno); - taosMemoryFree(pFD); - goto _exit; + const char *object_name = taosDirEntryBaseName((char *)path); + if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3Exists(object_name)) { + s3EvictCache(); + s3Get(object_name, path); + + pFD->pFD = taosOpenFile(path, flag); + + if (pFD->pFD == NULL) { + code = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(pFD); + goto _exit; + } + } else { + code = TAOS_SYSTEM_ERROR(errno); + taosMemoryFree(pFD); + goto _exit; + } } pFD->szPage = szPage; pFD->pgno = 0; diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 1507df7074..696632fc6f 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -112,3 +112,78 @@ void s3DeleteObjects(const char *object_name[], int nobject) { cos_warn_log("delete objects failed\n"); } } + +bool s3Exists(const char *object_name) { + bool ret = false; + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers; + cos_table_t *headers = NULL; + cos_object_exist_status_e object_exist; + + cos_pool_create(&p, NULL); + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + cos_str_set(&object, object_name); + + s = cos_check_object_exist(options, &bucket, &object, headers, &object_exist, &resp_headers); + if (object_exist == COS_OBJECT_NON_EXIST) { + cos_warn_log("object: %.*s non exist.\n", object.len, object.data); + } else if (object_exist == COS_OBJECT_EXIST) { + ret = true; + cos_warn_log("object: %.*s exist.\n", object.len, object.data); + } else { + cos_warn_log("object: %.*s unknown status.\n", object.len, object.data); + log_status(s); + } + + cos_pool_destroy(p); + + return ret; +} + +void s3Get(const char *object_name, const char *path) { + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_string_t file; + cos_table_t *resp_headers = NULL; + cos_table_t *headers = NULL; + int traffic_limit = 0; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + if (traffic_limit) { + //限速值设置范围为819200 - 838860800,即100KB/s - 100MB/s,如果超出该范围将返回400错误 + headers = cos_table_make(p, 1); + cos_table_add_int(headers, "x-cos-traffic-limit", 819200); + } + + //下载对象 + cos_str_set(&file, path); + cos_str_set(&object, object_name); + s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); + if (cos_status_is_ok(s)) { + cos_warn_log("get object succeeded\n"); + } else { + cos_warn_log("get object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); +} + +void s3EvictCache() {} From 65d8af19ed74f697ac0fef7fb007b6cf67a23f9e Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 4 Aug 2023 16:35:35 +0800 Subject: [PATCH 008/147] s3: new api s3Size --- source/dnode/vnode/src/inc/vndCos.h | 4 +- .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 3 +- source/dnode/vnode/src/vnd/vnodeCos.c | 45 ++++++++++++++++++- 3 files changed, 48 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index d4e19e9031..6e0984c400 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -29,9 +29,9 @@ void s3CleanUp(); void s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); -void s3Get(const char *object_name, const char *path); +bool s3Get(const char *object_name, const char *path); void s3EvictCache(); - +long s3Size(const char *object_name); #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 96037ff6be..872042d9d5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -36,7 +36,8 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p pFD->pFD = taosOpenFile(path, flag); if (pFD->pFD == NULL) { const char *object_name = taosDirEntryBaseName((char *)path); - if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3Exists(object_name)) { + long s3_size = s3Size(object_name); + if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3_size > 0) { s3EvictCache(); s3Get(object_name, path); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 696632fc6f..a7b166b6c7 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -147,7 +147,8 @@ bool s3Exists(const char *object_name) { return ret; } -void s3Get(const char *object_name, const char *path) { +bool s3Get(const char *object_name, const char *path) { + bool ret = false; cos_pool_t *p = NULL; int is_cname = 0; cos_status_t *s = NULL; @@ -177,6 +178,7 @@ void s3Get(const char *object_name, const char *path) { cos_str_set(&object, object_name); s = cos_get_object_to_file(options, &bucket, &object, headers, NULL, &file, &resp_headers); if (cos_status_is_ok(s)) { + ret = true; cos_warn_log("get object succeeded\n"); } else { cos_warn_log("get object failed\n"); @@ -184,6 +186,47 @@ void s3Get(const char *object_name, const char *path) { //销毁内存池 cos_pool_destroy(p); + + return ret; } void s3EvictCache() {} + +long s3Size(const char *object_name) { + long size = 0; + + cos_pool_t *p = NULL; + int is_cname = 0; + cos_status_t *s = NULL; + cos_request_options_t *options = NULL; + cos_string_t bucket; + cos_string_t object; + cos_table_t *resp_headers = NULL; + + //创建内存池 + cos_pool_create(&p, NULL); + + //初始化请求选项 + options = cos_request_options_create(p); + s3InitRequestOptions(options, is_cname); + cos_str_set(&bucket, tsS3BucketName); + + //获取对象元数据 + cos_str_set(&object, object_name); + s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); + // print_headers(resp_headers); + if (cos_status_is_ok(s)) { + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + size = atol(content_length_str); + } + cos_warn_log("head object succeeded: %ld\n", size); + } else { + cos_warn_log("head object failed\n"); + } + + //销毁内存池 + cos_pool_destroy(p); + + return size; +} From fac7e521e957ae1aaa279af48c0acd04c646817b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 7 Aug 2023 15:59:37 +0800 Subject: [PATCH 009/147] s3/evict: fetch atime from stat file --- include/os/osFile.h | 2 +- source/dnode/mgmt/mgmt_mnode/src/mmFile.c | 4 +- source/dnode/mgmt/mgmt_vnode/src/vmFile.c | 2 +- source/dnode/mgmt/node_util/src/dmEps.c | 6 +- source/dnode/mgmt/node_util/src/dmFile.c | 2 +- source/dnode/vnode/src/inc/vndCos.h | 3 +- source/dnode/vnode/src/tq/tqOffsetSnapshot.c | 2 +- source/dnode/vnode/src/tsdb/tsdbFS.c | 10 +- .../dnode/vnode/src/tsdb/tsdbReaderWriter.c | 4 +- source/dnode/vnode/src/vnd/vnodeCos.c | 66 +++++- source/libs/function/src/udfd.c | 8 +- source/libs/index/src/indexFstFile.c | 4 +- source/libs/parser/src/parTranslater.c | 207 +++++++++--------- source/libs/sync/src/syncRaftStore.c | 2 +- source/libs/wal/src/walMeta.c | 12 +- source/os/src/osFile.c | 28 ++- source/util/src/tlog.c | 60 ++--- tools/shell/src/shellEngine.c | 100 ++++----- utils/test/c/tmqDemo.c | 3 +- 19 files changed, 300 insertions(+), 225 deletions(-) diff --git a/include/os/osFile.h b/include/os/osFile.h index 0e93002706..da1f8f8b57 100644 --- a/include/os/osFile.h +++ b/include/os/osFile.h @@ -76,7 +76,7 @@ int32_t taosUnLockFile(TdFilePtr pFile); int32_t taosUmaskFile(int32_t maskVal); -int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime); +int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime); int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno); int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime); bool taosCheckExistFile(const char *pathname); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c index cb0849f4b9..64e18ef06d 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c @@ -46,7 +46,7 @@ static int32_t mmDecodeOption(SJson *pJson, SMnodeOpt *pOption) { if (code < 0) return -1; tjsonGetInt32ValueFromDouble(replica, "role", pOption->nodeRoles[i], code); if (code < 0) return -1; - if(pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER){ + if (pOption->nodeRoles[i] == TAOS_SYNC_ROLE_VOTER) { pOption->numOfReplicas++; } } @@ -65,7 +65,7 @@ int32_t mmReadFile(const char *path, SMnodeOpt *pOption) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%smnode.json", path, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("mnode file:%s not exist", file); return 0; } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index da7f4d4a56..ed32e75d18 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -97,7 +97,7 @@ int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t SWrapperCfg *pCfgs = NULL; snprintf(file, sizeof(file), "%s%svnodes.json", pMgmt->path, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("vnode file:%s not exist", file); return 0; } diff --git a/source/dnode/mgmt/node_util/src/dmEps.c b/source/dnode/mgmt/node_util/src/dmEps.c index 1564a09035..88f6b5da40 100644 --- a/source/dnode/mgmt/node_util/src/dmEps.c +++ b/source/dnode/mgmt/node_util/src/dmEps.c @@ -100,7 +100,7 @@ int32_t dmReadEps(SDnodeData *pData) { goto _OVER; } - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("dnode file:%s not exist", file); code = 0; goto _OVER; @@ -350,7 +350,7 @@ void dmRotateMnodeEpSet(SDnodeData *pData) { } void dmGetMnodeEpSetForRedirect(SDnodeData *pData, SRpcMsg *pMsg, SEpSet *pEpSet) { - if(!pData->validMnodeEps) return; + if (!pData->validMnodeEps) return; dmGetMnodeEpSet(pData, pEpSet); dTrace("msg is redirected, handle:%p num:%d use:%d", pMsg->info.handle, pEpSet->numOfEps, pEpSet->inUse); for (int32_t i = 0; i < pEpSet->numOfEps; ++i) { @@ -469,7 +469,7 @@ static int32_t dmReadDnodePairs(SDnodeData *pData) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%sdnode%sep.json", tsDataDir, TD_DIRSEP, TD_DIRSEP); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dDebug("dnode file:%s not exist", file); code = 0; goto _OVER; diff --git a/source/dnode/mgmt/node_util/src/dmFile.c b/source/dnode/mgmt/node_util/src/dmFile.c index fb05f08c0c..c81efddcc1 100644 --- a/source/dnode/mgmt/node_util/src/dmFile.c +++ b/source/dnode/mgmt/node_util/src/dmFile.c @@ -38,7 +38,7 @@ int32_t dmReadFile(const char *path, const char *name, bool *pDeployed) { char file[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%s%s.json", path, TD_DIRSEP, name); - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { dInfo("file:%s not exist", file); code = 0; goto _OVER; diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index 6e0984c400..f6db7f096e 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -30,8 +30,9 @@ void s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); bool s3Get(const char *object_name, const char *path); -void s3EvictCache(); +void s3EvictCache(const char *path, long object_size); long s3Size(const char *object_name); + #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c index a4428aed43..6a66da30c6 100644 --- a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c +++ b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c @@ -60,7 +60,7 @@ int32_t tqOffsetSnapRead(STqOffsetReader* pReader, uint8_t** ppData) { } int64_t sz = 0; - if (taosStatFile(fname, &sz, NULL) < 0) { + if (taosStatFile(fname, &sz, NULL, NULL) < 0) { taosCloseFile(&pFile); taosMemoryFree(fname); return -1; diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c index ec116c717e..c0c74d6b87 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS.c @@ -176,7 +176,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // SDelFile if (pTsdb->fs.pDelFile) { tsdbDelFileName(pTsdb, pTsdb->fs.pDelFile, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -195,7 +195,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // head ========= tsdbHeadFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pHeadF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -206,7 +206,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // data ========= tsdbDataFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pDataF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -221,7 +221,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // sma ============= tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } @@ -237,7 +237,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) { // stt =========== for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) { tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname); - if (taosStatFile(fname, &size, NULL)) { + if (taosStatFile(fname, &size, NULL, NULL)) { code = TAOS_SYSTEM_ERROR(errno); TSDB_CHECK_CODE(code, lino, _exit); } diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c index 872042d9d5..4d3b53bc5a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c +++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c @@ -38,7 +38,7 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p const char *object_name = taosDirEntryBaseName((char *)path); long s3_size = s3Size(object_name); if (!strncmp(path + strlen(path) - 5, ".data", 5) && s3_size > 0) { - s3EvictCache(); + s3EvictCache(path, s3_size); s3Get(object_name, path); pFD->pFD = taosOpenFile(path, flag); @@ -66,7 +66,7 @@ int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **p // not check file size when reading data files. if (flag != TD_FILE_READ) { - if (taosStatFile(path, &pFD->szFile, NULL) < 0) { + if (taosStatFile(path, &pFD->szFile, NULL, NULL) < 0) { code = TAOS_SYSTEM_ERROR(errno); taosMemoryFree(pFD->pBuf); taosCloseFile(&pFD->pFD); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index a7b166b6c7..bac38f7c35 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -190,7 +190,71 @@ bool s3Get(const char *object_name, const char *path) { return ret; } -void s3EvictCache() {} +typedef struct { + int64_t size; + int32_t atime; + char name[TSDB_FILENAME_LEN]; +} SEvictFile; + +static int32_t evictFileCompareAsce(const void *pLeft, const void *pRight) { + SEvictFile *lhs = (SEvictFile *)pLeft; + SEvictFile *rhs = (SEvictFile *)pRight; + return lhs->atime < rhs->atime ? -1 : 1; +} + +void s3EvictCache(const char *path, long object_size) { + SDiskSize disk_size = {0}; + if (taosGetDiskSize((char *)path, &disk_size) < 0) { + terrno = TAOS_SYSTEM_ERROR(errno); + vError("failed to get disk:%s size since %s", path, terrstr()); + return; + } + + if (object_size >= disk_size.avail + 1 << 30) { + // evict too old files + // 1, list data files' atime under dir(path) + char dir_name[TSDB_FILENAME_LEN] = "\0"; + tstrncpy(dir_name, path, TSDB_FILENAME_LEN); + taosDirName(dir_name); + + tdbDirPtr pDir = taosOpenDir(dir_name); + if (pDir == NULL) { + terrno = TAOS_SYSTEM_ERROR(errno); + vError("failed to open %s since %s", dir_name, terrstr()); + } + SArray *evict_files = taosArrayInit(16, sizeof(SEvictFile)); + tdbDirEntryPtr pDirEntry; + while ((pDirEntry = taosReadDir(pDir)) != NULL) { + char *name = taosGetDirEntryName(pDirEntry); + if (!strncmp(name + strlen(name) - 5, ".data", 5)) { + SEvictFile e_file = {0}; + + tstrncpy(e_file.name, name, TSDB_FILENAME_LEN); + taosStatFile(name, &e_file.size, NULL, &e_file.atime); + + taosArrayPush(evict_files, &e_file); + } + } + taosCloseDir(&pDir); + + // 2, sort by atime + taosArraySort(evict_files, evictFileCompareAsce); + + // 3, remove files ascendingly until we get enough object_size space + long evict_size = 0; + size_t ef_size = TARRAY_SIZE(evict_files); + for (size_t i = 0; i < ef_size; ++i) { + SEvictFile *evict_file = taosArrayGet(evict_files, i); + taosRemoveFile(evict_file->name); + evict_size += evict_file->size; + if (evict_size >= object_size) { + break; + } + } + + taosArrayDestroy(evict_files); + } +} long s3Size(const char *object_name) { long size = 0; diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index 7371017111..575bce09bb 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -378,9 +378,9 @@ int32_t udfdInitializePythonPlugin(SUdfScriptPlugin *plugin) { "pyUdfDestroy", "pyUdfScalarProc", "pyUdfAggStart", "pyUdfAggFinish", "pyUdfAggProc", "pyUdfAggMerge"}; void **funcs[UDFD_MAX_PLUGIN_FUNCS] = { - (void **)&plugin->openFunc, (void **)&plugin->closeFunc, (void **)&plugin->udfInitFunc, - (void **)&plugin->udfDestroyFunc, (void **)&plugin->udfScalarProcFunc, (void **)&plugin->udfAggStartFunc, - (void **)&plugin->udfAggFinishFunc, (void **)&plugin->udfAggProcFunc, (void **)&plugin->udfAggMergeFunc}; + (void **)&plugin->openFunc, (void **)&plugin->closeFunc, (void **)&plugin->udfInitFunc, + (void **)&plugin->udfDestroyFunc, (void **)&plugin->udfScalarProcFunc, (void **)&plugin->udfAggStartFunc, + (void **)&plugin->udfAggFinishFunc, (void **)&plugin->udfAggProcFunc, (void **)&plugin->udfAggMergeFunc}; int32_t err = udfdLoadSharedLib(plugin->libPath, &plugin->lib, funcName, funcs, UDFD_MAX_PLUGIN_FUNCS); if (err != 0) { fnError("can not load python plugin. lib path %s", plugin->libPath); @@ -848,7 +848,7 @@ int32_t udfdSaveFuncBodyToFile(SFuncInfo *pFuncInfo, SUdf *udf) { char path[PATH_MAX] = {0}; udfdGetFuncBodyPath(udf, path); - bool fileExist = !(taosStatFile(path, NULL, NULL) < 0); + bool fileExist = !(taosStatFile(path, NULL, NULL, NULL) < 0); if (fileExist) { strncpy(udf->path, path, PATH_MAX); fnInfo("udfd func body file. reuse existing file %s", path); diff --git a/source/libs/index/src/indexFstFile.c b/source/libs/index/src/indexFstFile.c index e18d0bbad3..43f15f5196 100644 --- a/source/libs/index/src/indexFstFile.c +++ b/source/libs/index/src/indexFstFile.c @@ -162,7 +162,7 @@ static FORCE_INLINE int idxFileCtxGetSize(IFileCtx* ctx) { return ctx->offset; } else { int64_t file_size = 0; - taosStatFile(ctx->file.buf, &file_size, NULL); + taosStatFile(ctx->file.buf, &file_size, NULL, NULL); return (int)file_size; } } @@ -199,7 +199,7 @@ IFileCtx* idxFileCtxCreate(WriterType type, const char* path, bool readOnly, int code = taosFtruncateFile(ctx->file.pFile, 0); UNUSED(code); - code = taosStatFile(path, &ctx->file.size, NULL); + code = taosStatFile(path, &ctx->file.size, NULL, NULL); UNUSED(code); ctx->file.wBufOffset = 0; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8ce68a5c8c..6845496a03 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -31,8 +31,8 @@ #define SYSTABLE_SHOW_TYPE_OFFSET QUERY_NODE_SHOW_DNODES_STMT typedef struct SRewriteTbNameContext { - int32_t errCode; - char* pTbName; + int32_t errCode; + char* pTbName; } SRewriteTbNameContext; typedef struct STranslateContext { @@ -54,7 +54,7 @@ typedef struct STranslateContext { bool stableQuery; bool showRewrite; SNode* pPrevRoot; - SNode* pPostRoot; + SNode* pPostRoot; } STranslateContext; typedef struct SBuildTopicContext { @@ -278,10 +278,11 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode); static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode); static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal); -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt); -static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); -static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); -static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt); +static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); +static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); +static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_BY; } @@ -772,7 +773,8 @@ static SNodeList* getProjectList(const SNode* pNode) { static bool isTimeLineQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { - return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); + return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || + (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); } else if (QUERY_NODE_SET_OPERATOR == nodeType(pStmt)) { return TIME_LINE_GLOBAL == ((SSetOperator*)pStmt)->timeLineResMode; } else { @@ -791,7 +793,7 @@ static bool isGlobalTimeLineQuery(SNode* pStmt) { } static bool isTimeLineAlignedQuery(SNode* pStmt) { - SSelectStmt *pSelect = (SSelectStmt *)pStmt; + SSelectStmt* pSelect = (SSelectStmt*)pStmt; if (isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return true; } @@ -801,7 +803,7 @@ static bool isTimeLineAlignedQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT != nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return false; } - SSelectStmt *pSub = (SSelectStmt *)((STempTableNode*)pSelect->pFromTable)->pSubquery; + SSelectStmt* pSub = (SSelectStmt*)((STempTableNode*)pSelect->pFromTable)->pSubquery; if (nodesListMatch(pSelect->pPartitionByList, pSub->pPartitionByList)) { return true; } @@ -1394,7 +1396,7 @@ static bool isCountStar(SFunctionNode* pFunc) { } static int32_t rewriteCountStarAsCount1(STranslateContext* pCxt, SFunctionNode* pCount) { - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1596,9 +1598,11 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } - if (pSelect->hasInterpFunc && (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { + if (pSelect->hasInterpFunc && + (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s ignoring null value options cannot be used when applying to multiple columns", pFunc->functionName); + "%s ignoring null value options cannot be used when applying to multiple columns", + pFunc->functionName); } if (NULL != pSelect->pWindow || NULL != pSelect->pGroupByList) { @@ -1636,7 +1640,8 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu } SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && - !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { + !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && + !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, "%s function requires valid time series input", pFunc->functionName); } @@ -1706,8 +1711,8 @@ static int32_t translateForbidSysTableFunc(STranslateContext* pCxt, SFunctionNod return TSDB_CODE_SUCCESS; } - SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; - SNode* pTable = pSelect->pFromTable; + SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; + SNode* pTable = pSelect->pFromTable; if (NULL != pTable && QUERY_NODE_REAL_TABLE == nodeType(pTable) && TSDB_SYSTEM_TABLE == ((SRealTableNode*)pTable)->pMeta->tableType) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, pFunc->functionName); @@ -2296,7 +2301,8 @@ static EDealRes doCheckExprForGroupBy(SNode** pNode, void* pContext) { } } if (isScanPseudoColumnFunc(*pNode) || QUERY_NODE_COLUMN == nodeType(*pNode)) { - if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { + if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || + (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { return generateDealNodeErrMsg(pCxt, getGroupByErrorCode(pCxt), ((SExprNode*)(*pNode))->userAlias); } else { return rewriteColToSelectValFunc(pCxt, pNode); @@ -2391,14 +2397,14 @@ static int32_t checkHavingGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) if (NULL != pSelect->pHaving) { code = checkExprForGroupBy(pCxt, &pSelect->pHaving); } -/* - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); - } - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); + } + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); + } + */ return code; } @@ -2657,10 +2663,10 @@ static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSele static EDealRes doTranslateTbName(SNode** pNode, void* pContext) { switch (nodeType(*pNode)) { case QUERY_NODE_FUNCTION: { - SFunctionNode *pFunc = (SFunctionNode *)*pNode; + SFunctionNode* pFunc = (SFunctionNode*)*pNode; if (FUNCTION_TYPE_TBNAME == pFunc->funcType) { - SRewriteTbNameContext *pCxt = (SRewriteTbNameContext*)pContext; - SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + SRewriteTbNameContext* pCxt = (SRewriteTbNameContext*)pContext; + SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; return DEAL_RES_ERROR; @@ -2699,11 +2705,12 @@ static int32_t replaceTbName(STranslateContext* pCxt, SSelectStmt* pSelect) { } SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable; - if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { + if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && + TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { return TSDB_CODE_SUCCESS; } - SNode** pNode = NULL; + SNode** pNode = NULL; SRewriteTbNameContext pRewriteCxt = {0}; pRewriteCxt.pTbName = pTable->table.tableName; @@ -3110,7 +3117,8 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList code = scalarCalculateConstants(pCastFunc, &pCell->pNode); } if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) { - code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); + code = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); } else if (TSDB_CODE_SUCCESS != code) { code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch"); } @@ -3576,7 +3584,6 @@ static int32_t createDefaultEveryNode(STranslateContext* pCxt, SNode** pOutput) pEvery->isDuration = true; pEvery->literal = taosStrdup("1s"); - *pOutput = (SNode*)pEvery; return TSDB_CODE_SUCCESS; } @@ -3671,15 +3678,15 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->currClause = SQL_CLAUSE_PARTITION_BY; int32_t code = TSDB_CODE_SUCCESS; - + if (pSelect->pPartitionByList) { int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable); SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0); - if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && - 1 == pSelect->pPartitionByList->length && (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { + if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && 1 == pSelect->pPartitionByList->length && + (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { pSelect->timeLineResMode = TIME_LINE_MULTI; } - + code = translateExprList(pCxt, pSelect->pPartitionByList); } if (TSDB_CODE_SUCCESS == code) { @@ -3943,9 +3950,9 @@ static int32_t translateSetOperProject(STranslateContext* pCxt, SSetOperator* pS } snprintf(pRightExpr->aliasName, sizeof(pRightExpr->aliasName), "%s", pLeftExpr->aliasName); SNode* pProj = createSetOperProject(pSetOperator->stmtName, pLeft); - if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) - && ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID - && ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { + if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) && + ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID && + ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { ((SColumnNode*)pProj)->colId = PRIMARYKEY_TIMESTAMP_COL_ID; } if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSetOperator->pProjectionList, pProj)) { @@ -5725,7 +5732,6 @@ static int32_t translateRestoreDnode(STranslateContext* pCxt, SRestoreComponentN return buildCmdMsg(pCxt, TDMT_MND_RESTORE_DNODE, (FSerializeFunc)tSerializeSRestoreDnodeReq, &restoreReq); } - static int32_t getSmaIndexDstVgId(STranslateContext* pCxt, const char* pDbName, const char* pTableName, int32_t* pVgId) { SVgroupInfo vg = {0}; @@ -5853,7 +5859,7 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS } static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { - int32_t code = checkCreateSmaIndex(pCxt, pStmt); + int32_t code = checkCreateSmaIndex(pCxt, pStmt); pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq)); if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY; if (TSDB_CODE_SUCCESS == code) { @@ -5867,13 +5873,15 @@ int32_t createIntervalFromCreateSmaIndexStmt(SCreateIndexStmt* pStmt, SInterval* pInterval->interval = ((SValueNode*)pStmt->pOptions->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit; pInterval->offset = NULL != pStmt->pOptions->pOffset ? ((SValueNode*)pStmt->pOptions->pOffset)->datum.i : 0; - pInterval->sliding = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; - pInterval->slidingUnit = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; + pInterval->sliding = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; + pInterval->slidingUnit = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; pInterval->precision = pStmt->pOptions->tsPrecision; return TSDB_CODE_SUCCESS; } -int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void ** pResRow) { +int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { int32_t code = TSDB_CODE_SUCCESS; SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot; int64_t lastTs = 0; @@ -6041,7 +6049,7 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS toName(pCxt->pParseCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name); tNameGetFullDbName(&name, pReq->subDbName); tNameExtractFullName(&name, pReq->subStbName); - if(pStmt->pQuery != NULL) { + if (pStmt->pQuery != NULL) { code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL); } } else if ('\0' != pStmt->subDbName[0]) { @@ -6096,11 +6104,12 @@ static EDealRes checkColumnTagsInCond(SNode* pNode, void* pContext) { addTagList(&pCxt->pTags, nodesCloneNode(pNode)); } } - + return DEAL_RES_CONTINUE; } -static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, SNodeList** ppProjection) { +static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, + SNodeList** ppProjection) { SBuildTopicContext colCxt = {.colExists = false, .colNotFound = false, .pMeta = pMeta, .pTags = NULL}; nodesWalkExprPostOrder(pStmt->pWhere, checkColumnTagsInCond, &colCxt); if (colCxt.colNotFound) { @@ -6110,18 +6119,18 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* nodesDestroyList(colCxt.pTags); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Columns are forbidden in where clause"); } - if (NULL == colCxt.pTags) { // put one column to select -// for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { - SSchema* column = &pMeta->schema[0]; - SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); - if (NULL == col) { - return TSDB_CODE_OUT_OF_MEMORY; - } - strcpy(col->colName, column->name); - strcpy(col->node.aliasName, col->colName); - strcpy(col->node.userAlias, col->colName); - addTagList(&colCxt.pTags, (SNode*)col); -// } + if (NULL == colCxt.pTags) { // put one column to select + // for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { + SSchema* column = &pMeta->schema[0]; + SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == col) { + return TSDB_CODE_OUT_OF_MEMORY; + } + strcpy(col->colName, column->name); + strcpy(col->node.aliasName, col->colName); + strcpy(col->node.userAlias, col->colName); + addTagList(&colCxt.pTags, (SNode*)col); + // } } *ppProjection = colCxt.pTags; @@ -6129,13 +6138,13 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* } static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt* pStmt, SNode** pSelect) { - SParseContext* pParCxt = pCxt->pParseCxt; - SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, - .requestId = pParCxt->requestId, + SParseContext* pParCxt = pCxt->pParseCxt; + SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, + .requestId = pParCxt->requestId, .requestObjRefId = pParCxt->requestRid, .mgmtEps = pParCxt->mgmtEpSet}; - SName name; - STableMeta* pMeta = NULL; + SName name; + STableMeta* pMeta = NULL; int32_t code = getTableMetaImpl(pCxt, toName(pParCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name), &pMeta); if (code) { taosMemoryFree(pMeta); @@ -6144,7 +6153,7 @@ static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt if (TSDB_SUPER_TABLE != pMeta->tableType) { taosMemoryFree(pMeta); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Only supertable table can be used"); - } + } SNodeList* pProjection = NULL; code = checkCollectTopicTags(pCxt, pStmt, pMeta, &pProjection); @@ -6542,7 +6551,8 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must be of VARCHAR type"); } - if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { + if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && + subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must not has column when no partition by clause"); } @@ -6895,28 +6905,28 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta if (NULL == col) { return TSDB_CODE_OUT_OF_MEMORY; } - + strcpy(col->tableAlias, pTable); strcpy(col->colName, pMeta->schema[0].name); SNodeList* pParamterList = nodesMakeList(); if (NULL == pParamterList) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); return TSDB_CODE_OUT_OF_MEMORY; } - - int32_t code = nodesListStrictAppend(pParamterList, (SNode *)col); + + int32_t code = nodesListStrictAppend(pParamterList, (SNode*)col); if (code) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); nodesDestroyList(pParamterList); return code; } - + SNode* pFunc = (SNode*)createFunction("last", pParamterList); if (NULL == pFunc) { nodesDestroyList(pParamterList); return TSDB_CODE_OUT_OF_MEMORY; } - + SNodeList* pProjectionList = nodesMakeList(); if (NULL == pProjectionList) { nodesDestroyList(pParamterList); @@ -6928,8 +6938,8 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta nodesDestroyList(pProjectionList); return code; } - - code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt **)pQuery); + + code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt**)pQuery); if (code) { nodesDestroyList(pProjectionList); return code; @@ -6967,14 +6977,14 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt if (TSDB_CODE_SUCCESS == code && pStmt->pOptions->fillHistory) { SRealTableNode* pTable = (SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable); code = createLastTsSelectStmt(pTable->table.dbName, pTable->table.tableName, pTable->pMeta, &pStmt->pPrevQuery); -/* - if (TSDB_CODE_SUCCESS == code) { - STranslateContext cxt = {0}; - int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); - code = translateQuery(&cxt, pStmt->pPrevQuery); - destroyTranslateContext(&cxt); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code) { + STranslateContext cxt = {0}; + int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); + code = translateQuery(&cxt, pStmt->pPrevQuery); + destroyTranslateContext(&cxt); + } + */ } taosMemoryFree(pMeta); return code; @@ -7069,7 +7079,7 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow)) { return code; } - + SIntervalWindowNode* pWindow = (SIntervalWindowNode*)pSelect->pWindow; pInterval->interval = ((SValueNode*)pWindow->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pWindow->pInterval)->unit; @@ -7077,16 +7087,16 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval->sliding = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->datum.i : pInterval->interval); pInterval->slidingUnit = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->unit : pInterval->intervalUnit); - pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; + pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; return code; } int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { SCreateStreamStmt* pStmt = (SCreateStreamStmt*)pQuery->pRoot; - STranslateContext cxt = {0}; - SInterval interval = {0}; - int64_t lastTs = 0; + STranslateContext cxt = {0}; + SInterval interval = {0}; + int64_t lastTs = 0; int32_t code = initTranslateContext(pParseCxt, NULL, &cxt); if (TSDB_CODE_SUCCESS == code) { @@ -7121,7 +7131,6 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void return code; } - static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pStmt) { SMDropStreamReq dropReq = {0}; SName name; @@ -7152,7 +7161,7 @@ static int32_t translateResumeStream(STranslateContext* pCxt, SResumeStreamStmt* static int32_t readFromFile(char* pName, int32_t* len, char** buf) { int64_t filesize = 0; - if (taosStatFile(pName, &filesize, NULL) < 0) { + if (taosStatFile(pName, &filesize, NULL, NULL) < 0) { return TAOS_SYSTEM_ERROR(errno); } @@ -7246,7 +7255,7 @@ static int32_t translateGrantTagCond(STranslateContext* pCxt, SGrantStmt* pStmt, } } - int32_t code = createRealTableForGrantTable(pStmt, &pTable); + int32_t code = createRealTableForGrantTable(pStmt, &pTable); if (TSDB_CODE_SUCCESS == code) { SName name; code = getTableMetaImpl(pCxt, toName(pCxt->pParseCxt->acctId, pTable->table.dbName, pTable->table.tableName, &name), @@ -7806,7 +7815,8 @@ static SNodeList* createProjectCols(int32_t ncols, const char* const pCols[]) { return pProjections; } -static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { SSelectStmt* pSelect = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT); if (NULL == pSelect) { return TSDB_CODE_OUT_OF_MEMORY; @@ -7829,9 +7839,8 @@ static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, S return TSDB_CODE_SUCCESS; } - static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTable, int32_t numOfProjs, - const char* const pProjCol[], SSelectStmt** pStmt) { + const char* const pProjCol[], SSelectStmt** pStmt) { SNodeList* pProjectionList = NULL; if (numOfProjs >= 0) { pProjectionList = createProjectCols(numOfProjs, pProjCol); @@ -7843,13 +7852,15 @@ static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTabl return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } static int32_t createSelectStmtForShow(ENodeType showType, SSelectStmt** pStmt) { const SSysTableShowAdapter* pShow = &sysTableShowAdapter[showType - SYSTABLE_SHOW_TYPE_OFFSET]; - return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, pStmt); + return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, + pStmt); } static int32_t createSelectStmtForShowTableDist(SShowTableDistributedStmt* pStmt, SSelectStmt** pOutput) { @@ -7987,8 +7998,8 @@ static int32_t createShowTableTagsProjections(SNodeList** pProjections, SNodeLis static int32_t rewriteShowStableTags(STranslateContext* pCxt, SQuery* pQuery) { SShowTableTagsStmt* pShow = (SShowTableTagsStmt*)pQuery->pRoot; SSelectStmt* pSelect = NULL; - int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal, - -1, NULL, &pSelect); + int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, + ((SValueNode*)pShow->pTbName)->literal, -1, NULL, &pSelect); if (TSDB_CODE_SUCCESS == code) { code = createShowTableTagsProjections(&pSelect->pProjectionList, &pShow->pTags); } diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c index bd15567c87..051106b99d 100644 --- a/source/libs/sync/src/syncRaftStore.c +++ b/source/libs/sync/src/syncRaftStore.c @@ -42,7 +42,7 @@ int32_t raftStoreReadFile(SSyncNode *pNode) { const char *file = pNode->raftStorePath; SRaftStore *pStore = &pNode->raftStore; - if (taosStatFile(file, NULL, NULL) < 0) { + if (taosStatFile(file, NULL, NULL, NULL) < 0) { sInfo("vgId:%d, raft store file:%s not exist, use default value", pNode->vgId, file); pStore->currentTerm = 0; pStore->voteFor.addr = 0; diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c index 01d23a7e96..2acdd975e5 100644 --- a/source/libs/wal/src/walMeta.c +++ b/source/libs/wal/src/walMeta.c @@ -53,7 +53,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal, int32_t fileIdx) { walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); TdFilePtr pFile = taosOpenFile(fnameStr, TD_FILE_READ | TD_FILE_WRITE); if (pFile == NULL) { @@ -304,7 +304,7 @@ int walRepairLogFileTs(SWal* pWal, bool* updateMeta) { walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); int32_t mtime = 0; - if (taosStatFile(fnameStr, NULL, &mtime) < 0) { + if (taosStatFile(fnameStr, NULL, &mtime, NULL) < 0) { terrno = TAOS_SYSTEM_ERROR(errno); wError("vgId:%d, failed to stat file due to %s, file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); return -1; @@ -353,7 +353,7 @@ int walTrimIdxFile(SWal* pWal, int32_t fileIdx) { walBuildIdxName(pWal, pFileInfo->firstVer, fnameStr); int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); int64_t records = TMAX(0, pFileInfo->lastVer - pFileInfo->firstVer + 1); int64_t lastEndOffset = records * sizeof(SWalIdxEntry); @@ -436,7 +436,7 @@ int walCheckAndRepairMeta(SWal* pWal) { SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, fileIdx); walBuildLogName(pWal, pFileInfo->firstVer, fnameStr); - int32_t code = taosStatFile(fnameStr, &fileSize, NULL); + int32_t code = taosStatFile(fnameStr, &fileSize, NULL, NULL); if (code < 0) { terrno = TAOS_SYSTEM_ERROR(errno); wError("failed to stat file since %s. file:%s", terrstr(), fnameStr); @@ -522,7 +522,7 @@ int walCheckAndRepairIdxFile(SWal* pWal, int32_t fileIdx) { walBuildLogName(pWal, pFileInfo->firstVer, fLogNameStr); int64_t fileSize = 0; - if (taosStatFile(fnameStr, &fileSize, NULL) < 0 && errno != ENOENT) { + if (taosStatFile(fnameStr, &fileSize, NULL, NULL) < 0 && errno != ENOENT) { wError("vgId:%d, failed to stat file due to %s. file:%s", pWal->cfg.vgId, strerror(errno), fnameStr); terrno = TAOS_SYSTEM_ERROR(errno); return -1; @@ -935,7 +935,7 @@ int walLoadMeta(SWal* pWal) { walBuildMetaName(pWal, metaVer, fnameStr); // read metafile int64_t fileSize = 0; - taosStatFile(fnameStr, &fileSize, NULL); + taosStatFile(fnameStr, &fileSize, NULL, NULL); if (fileSize == 0) { (void)taosRemoveFile(fnameStr); wDebug("vgId:%d, wal find empty meta ver %d", pWal->cfg.vgId, metaVer); diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index dd670595f0..c4309b2c55 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -191,7 +191,7 @@ int32_t taosRenameFile(const char *oldName, const char *newName) { #endif } -int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { +int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime, int32_t *atime) { #ifdef WINDOWS struct _stati64 fileStat; int32_t code = _stati64(path, &fileStat); @@ -211,6 +211,10 @@ int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) { *mtime = fileStat.st_mtime; } + if (atime != NULL) { + *atime = fileStat.st_mtime; + } + return 0; } int32_t taosDevInoFile(TdFilePtr pFile, int64_t *stDev, int64_t *stIno) { @@ -540,7 +544,7 @@ int32_t taosFStatFile(TdFilePtr pFile, int64_t *size, int32_t *mtime) { #ifdef WINDOWS struct __stat64 fileStat; - int32_t code = _fstat64(pFile->fd, &fileStat); + int32_t code = _fstat64(pFile->fd, &fileStat); #else struct stat fileStat; int32_t code = fstat(pFile->fd, &fileStat); @@ -897,17 +901,17 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) { goto cmp_end; } - dstFp = gzdopen(pFile->fd, "wb6f"); - if (dstFp == NULL) { - ret = -3; - taosCloseFile(&pFile); - goto cmp_end; - } + dstFp = gzdopen(pFile->fd, "wb6f"); + if (dstFp == NULL) { + ret = -3; + taosCloseFile(&pFile); + goto cmp_end; + } - while (!feof(pSrcFile->fp)) { - len = (int32_t)fread(data, 1, compressSize, pSrcFile->fp); - (void)gzwrite(dstFp, data, len); - } + while (!feof(pSrcFile->fp)) { + len = (int32_t)fread(data, 1, compressSize, pSrcFile->fp); + (void)gzwrite(dstFp, data, len); + } cmp_end: if (pSrcFile) { diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index de7ad848ed..4a15b5b976 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -17,8 +17,8 @@ #include "tlog.h" #include "os.h" #include "tconfig.h" -#include "tjson.h" #include "tglobal.h" +#include "tjson.h" #define LOG_MAX_LINE_SIZE (10024) #define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) @@ -74,12 +74,12 @@ static SLogObj tsLogObj = {.fileNum = 1}; static int64_t tsAsyncLogLostLines = 0; static int32_t tsDaylightActive; /* Currently in daylight saving time. */ -bool tsLogEmbedded = 0; -bool tsAsyncLog = true; +bool tsLogEmbedded = 0; +bool tsAsyncLog = true; #ifdef ASSERT_NOT_CORE -bool tsAssert = false; +bool tsAssert = false; #else -bool tsAssert = true; +bool tsAssert = true; #endif int32_t tsNumOfLogLines = 10000000; int32_t tsLogKeepDays = 0; @@ -160,7 +160,7 @@ int32_t taosInitSlowLog() { tsLogObj.slowHandle = taosLogBuffNew(LOG_SLOW_BUF_SIZE); if (tsLogObj.slowHandle == NULL) return -1; - + taosUmaskFile(0); tsLogObj.slowHandle->pFile = taosOpenFile(fullName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); if (tsLogObj.slowHandle->pFile == NULL) { @@ -403,13 +403,13 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { strcpy(name, fn); strcat(name, ".0"); } - bool log0Exist = taosStatFile(name, NULL, &logstat0_mtime) >= 0; + bool log0Exist = taosStatFile(name, NULL, &logstat0_mtime, NULL) >= 0; if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { strcpy(name, fn); strcat(name, ".1"); } - bool log1Exist = taosStatFile(name, NULL, &logstat1_mtime) >= 0; + bool log1Exist = taosStatFile(name, NULL, &logstat1_mtime, NULL) >= 0; // if none of the log files exist, open 0, if both exists, open the old one if (!log0Exist && !log1Exist) { @@ -576,7 +576,7 @@ void taosPrintSlowLog(const char *format, ...) { } else { taosWriteFile(tsLogObj.slowHandle->pFile, buffer, len); } - + taosMemoryFree(buffer); } @@ -769,12 +769,12 @@ static void taosWriteLog(SLogBuff *pLogBuf) { static void *taosAsyncOutputLog(void *param) { SLogBuff *pLogBuf = (SLogBuff *)tsLogObj.logHandle; SLogBuff *pSlowBuf = (SLogBuff *)tsLogObj.slowHandle; - + setThreadName("log"); int32_t count = 0; int32_t updateCron = 0; int32_t writeInterval = 0; - + while (1) { writeInterval = TMIN(pLogBuf->writeInterval, pSlowBuf->writeInterval); count += writeInterval; @@ -834,12 +834,12 @@ bool taosAssertDebug(bool condition, const char *file, int32_t line, const char return true; } -void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, void *sigInfo) { +void taosLogCrashInfo(char *nodeType, char *pMsg, int64_t msgLen, int signum, void *sigInfo) { const char *flags = "UTL FATAL "; ELogLevel level = DEBUG_FATAL; int32_t dflag = 255; - char filepath[PATH_MAX] = {0}; - TdFilePtr pFile = NULL; + char filepath[PATH_MAX] = {0}; + TdFilePtr pFile = NULL; if (pMsg && msgLen > 0) { snprintf(filepath, sizeof(filepath), "%s%s.%sCrashLog", tsLogDir, TD_DIRSEP, nodeType); @@ -856,16 +856,16 @@ void taosLogCrashInfo(char* nodeType, char* pMsg, int64_t msgLen, int signum, vo int64_t writeSize = taosWriteFile(pFile, &msgLen, sizeof(msgLen)); if (sizeof(msgLen) != writeSize) { taosUnLockFile(pFile); - taosPrintLog(flags, level, dflag, "failed to write len to file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", - filepath, pFile, writeSize, sizeof(msgLen), terrstr()); + taosPrintLog(flags, level, dflag, "failed to write len to file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", + filepath, pFile, writeSize, sizeof(msgLen), terrstr()); goto _return; } writeSize = taosWriteFile(pFile, pMsg, msgLen); if (msgLen != writeSize) { taosUnLockFile(pFile); - taosPrintLog(flags, level, dflag, "failed to write file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", - filepath, pFile, writeSize, msgLen, terrstr()); + taosPrintLog(flags, level, dflag, "failed to write file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", + filepath, pFile, writeSize, msgLen, terrstr()); goto _return; } @@ -883,7 +883,7 @@ _return: taosPrintTrace(flags, level, dflag, 4); #elif !defined(WINDOWS) taosPrintLog(flags, level, dflag, "sender PID:%d cmdline:%s", ((siginfo_t *)sigInfo)->si_pid, - taosGetCmdlineByPID(((siginfo_t *)sigInfo)->si_pid)); + taosGetCmdlineByPID(((siginfo_t *)sigInfo)->si_pid)); taosPrintTrace(flags, level, dflag, 3); #else taosPrintTrace(flags, level, dflag, 8); @@ -892,17 +892,17 @@ _return: taosMemoryFree(pMsg); } -void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* pFd) { +void taosReadCrashInfo(char *filepath, char **pMsg, int64_t *pMsgLen, TdFilePtr *pFd) { const char *flags = "UTL FATAL "; ELogLevel level = DEBUG_FATAL; int32_t dflag = 255; TdFilePtr pFile = NULL; bool truncateFile = false; - char* buf = NULL; + char *buf = NULL; if (NULL == *pFd) { int64_t filesize = 0; - if (taosStatFile(filepath, &filesize, NULL) < 0) { + if (taosStatFile(filepath, &filesize, NULL, NULL) < 0) { if (ENOENT == errno) { return; } @@ -916,7 +916,7 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* return; } - pFile = taosOpenFile(filepath, TD_FILE_READ|TD_FILE_WRITE); + pFile = taosOpenFile(filepath, TD_FILE_READ | TD_FILE_WRITE); if (pFile == NULL) { if (ENOENT == errno) { return; @@ -926,7 +926,7 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* taosPrintLog(flags, level, dflag, "failed to open file:%s since %s", filepath, terrstr()); return; } - + taosLockFile(pFile); } else { pFile = *pFd; @@ -937,8 +937,8 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* if (sizeof(msgLen) != readSize) { truncateFile = true; if (readSize < 0) { - taosPrintLog(flags, level, dflag, "failed to read len from file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", - filepath, pFile, readSize, sizeof(msgLen), terrstr()); + taosPrintLog(flags, level, dflag, "failed to read len from file:%s,%p wlen:%" PRId64 " tlen:%lu since %s", + filepath, pFile, readSize, sizeof(msgLen), terrstr()); } goto _return; } @@ -948,12 +948,12 @@ void taosReadCrashInfo(char* filepath, char** pMsg, int64_t* pMsgLen, TdFilePtr* taosPrintLog(flags, level, dflag, "failed to malloc buf, size:%" PRId64, msgLen); goto _return; } - + readSize = taosReadFile(pFile, buf, msgLen); if (msgLen != readSize) { truncateFile = true; - taosPrintLog(flags, level, dflag, "failed to read file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", - filepath, pFile, readSize, msgLen, terrstr()); + taosPrintLog(flags, level, dflag, "failed to read file:%s,%p wlen:%" PRId64 " tlen:%" PRId64 " since %s", filepath, + pFile, readSize, msgLen, terrstr()); goto _return; } @@ -981,7 +981,7 @@ void taosReleaseCrashLogFile(TdFilePtr pFile, bool truncateFile) { if (truncateFile) { taosFtruncateFile(pFile, 0); } - + taosUnLockFile(pFile); taosCloseFile(&pFile); } diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index e9dd067ac4..860622ea18 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -18,9 +18,9 @@ #define _GNU_SOURCE #define _XOPEN_SOURCE #define _DEFAULT_SOURCE -#include "shellInt.h" -#include "shellAuto.h" #include "geosWrapper.h" +#include "shellAuto.h" +#include "shellInt.h" static bool shellIsEmptyCommand(const char *cmd); static int32_t shellRunSingleCommand(char *command); @@ -41,9 +41,9 @@ static bool shellIsCommentLine(char *line); static void shellSourceFile(const char *file); static void shellGetGrantInfo(); -static void shellCleanup(void *arg); -static void *shellCancelHandler(void *arg); -static void *shellThreadLoop(void *arg); +static void shellCleanup(void *arg); +static void *shellCancelHandler(void *arg); +static void *shellThreadLoop(void *arg); bool shellIsEmptyCommand(const char *cmd) { for (char c = *cmd++; c != 0; c = *cmd++) { @@ -66,7 +66,7 @@ int32_t shellRunSingleCommand(char *command) { if (shellRegexMatch(command, "^[\t ]*clear[ \t;]*$", REG_EXTENDED | REG_ICASE)) { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-result" - system("clear"); + system("clear"); #pragma GCC diagnostic pop return 0; } @@ -142,8 +142,8 @@ int32_t shellRunCommand(char *command, bool recordHistory) { return 0; } - // add help or help; - if(strncasecmp(command, "help;", 5) == 0) { + // add help or help; + if (strncasecmp(command, "help;", 5) == 0) { showHelp(); return 0; } @@ -223,14 +223,14 @@ void shellRunSingleCommandImp(char *command) { } // pre string - char * pre = "Query OK"; + char *pre = "Query OK"; if (shellRegexMatch(command, "^\\s*delete\\s*from\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Delete OK"; - } else if(shellRegexMatch(command, "^\\s*insert\\s*into\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*insert\\s*into\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Insert OK"; - } else if(shellRegexMatch(command, "^\\s*create\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*create\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Create OK"; - } else if(shellRegexMatch(command, "^\\s*drop\\s*.*", REG_EXTENDED | REG_ICASE)) { + } else if (shellRegexMatch(command, "^\\s*drop\\s*.*", REG_EXTENDED | REG_ICASE)) { pre = "Drop OK"; } @@ -295,7 +295,7 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) { if (taosLocalTime(&tt, &ptm, buf) == NULL) { return buf; } - size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); + size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm); if (precision == TSDB_TIME_PRECISION_NANO) { sprintf(buf + pos, ".%09d", ms); @@ -387,22 +387,20 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i break; case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: - case TSDB_DATA_TYPE_JSON: - { - int32_t bufIndex = 0; - for (int32_t i = 0; i < length; i++) { + case TSDB_DATA_TYPE_JSON: { + int32_t bufIndex = 0; + for (int32_t i = 0; i < length; i++) { + buf[bufIndex] = val[i]; + bufIndex++; + if (val[i] == '\"') { buf[bufIndex] = val[i]; bufIndex++; - if (val[i] == '\"') { - buf[bufIndex] = val[i]; - bufIndex++; - } } - buf[bufIndex] = 0; - - taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); } - break; + buf[bufIndex] = 0; + + taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr); + } break; case TSDB_DATA_TYPE_GEOMETRY: shellDumpHexValue(buf, val, length); taosFprintfFile(pFile, "%s", buf); @@ -535,12 +533,10 @@ void shellPrintString(const char *str, int32_t width) { if (width == 0) { printf("%s", str); - } - else if (len > width) { + } else if (len > width) { if (width <= 3) { printf("%.*s.", width - 1, str); - } - else { + } else { printf("%.*s...", width - 3, str); } } else { @@ -549,7 +545,7 @@ void shellPrintString(const char *str, int32_t width) { } void shellPrintGeometry(const unsigned char *val, int32_t length, int32_t width) { - if (length == 0) { //empty value + if (length == 0) { // empty value shellPrintString("", width); return; } @@ -565,7 +561,7 @@ void shellPrintGeometry(const unsigned char *val, int32_t length, int32_t width) char *outputWKT = NULL; code = doAsText(val, length, &outputWKT); if (code != TSDB_CODE_SUCCESS) { - shellPrintString(getThreadLocalGeosCtx()->errMsg, width); //should NOT happen + shellPrintString(getThreadLocalGeosCtx()->errMsg, width); // should NOT happen return; } @@ -612,27 +608,26 @@ void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t break; case TSDB_DATA_TYPE_FLOAT: if (tsEnableScience) { - printf("%*.7e",width,GET_FLOAT_VAL(val)); + printf("%*.7e", width, GET_FLOAT_VAL(val)); } else { n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.7f", width, GET_FLOAT_VAL(val)); if (n > SHELL_FLOAT_WIDTH) { - - printf("%*.7e", width,GET_FLOAT_VAL(val)); + printf("%*.7e", width, GET_FLOAT_VAL(val)); } else { - printf("%s", buf); + printf("%s", buf); } } break; case TSDB_DATA_TYPE_DOUBLE: if (tsEnableScience) { - snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15e", width,GET_DOUBLE_VAL(val)); + snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15e", width, GET_DOUBLE_VAL(val)); printf("%s", buf); } else { n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.15f", width, GET_DOUBLE_VAL(val)); if (n > SHELL_DOUBLE_WIDTH) { - printf("%*.15e", width, GET_DOUBLE_VAL(val)); + printf("%*.15e", width, GET_DOUBLE_VAL(val)); } else { - printf("%*s", width,buf); + printf("%*s", width, buf); } } break; @@ -905,7 +900,7 @@ void shellReadHistory() { TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_READ | TD_FILE_STREAM); if (pFile == NULL) return; - char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); + char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); int32_t read_size = 0; while ((read_size = taosGetsFile(pFile, TSDB_MAX_ALLOWED_SQL_LEN, line)) != -1) { line[read_size - 1] = '\0'; @@ -922,8 +917,8 @@ void shellReadHistory() { taosMemoryFreeClear(line); taosCloseFile(&pFile); int64_t file_size; - if (taosStatFile(pHistory->file, &file_size, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) { - TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC); + if (taosStatFile(pHistory->file, &file_size, NULL, NULL) == 0 && file_size > SHELL_MAX_COMMAND_SIZE) { + TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_TRUNC); if (pFile == NULL) return; int32_t endIndex = pHistory->hstart; if (endIndex != 0) { @@ -945,7 +940,7 @@ void shellReadHistory() { void shellWriteHistory() { SShellHistory *pHistory = &shell.history; if (pHistory->hend == pHistory->hstart) return; - TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_APPEND); + TdFilePtr pFile = taosOpenFile(pHistory->file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_STREAM | TD_FILE_APPEND); if (pFile == NULL) return; for (int32_t i = pHistory->hstart; i != pHistory->hend;) { @@ -991,7 +986,7 @@ void shellSourceFile(const char *file) { tstrncpy(fullname, file, PATH_MAX); } - sprintf(sourceFileCommand, "source %s;",fullname); + sprintf(sourceFileCommand, "source %s;", fullname); shellRecordCommandToHistory(sourceFileCommand); TdFilePtr pFile = taosOpenFile(fullname, TD_FILE_READ | TD_FILE_STREAM); @@ -1001,7 +996,7 @@ void shellSourceFile(const char *file) { return; } - char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); + char *line = taosMemoryMalloc(TSDB_MAX_ALLOWED_SQL_LEN + 1); while ((read_len = taosGetsFile(pFile, TSDB_MAX_ALLOWED_SQL_LEN, line)) != -1) { if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; line[--read_len] = '\0'; @@ -1044,7 +1039,8 @@ void shellGetGrantInfo() { int32_t code = taos_errno(tres); if (code != TSDB_CODE_SUCCESS) { - if (code != TSDB_CODE_OPS_NOT_SUPPORT && code != TSDB_CODE_MND_NO_RIGHTS && code != TSDB_CODE_PAR_PERMISSION_DENIED) { + if (code != TSDB_CODE_OPS_NOT_SUPPORT && code != TSDB_CODE_MND_NO_RIGHTS && + code != TSDB_CODE_PAR_PERMISSION_DENIED) { fprintf(stderr, "Failed to check Server Edition, Reason:0x%04x:%s\r\n\r\n", code, taos_errstr(tres)); } return; @@ -1080,7 +1076,8 @@ void shellGetGrantInfo() { } else if (strcmp(expiretime, "unlimited") == 0) { fprintf(stdout, "Server is Enterprise %s Edition, %s and will never expire.\r\n", serverVersion, sinfo); } else { - fprintf(stdout, "Server is Enterprise %s Edition, %s and will expire at %s.\r\n", serverVersion, sinfo, expiretime); + fprintf(stdout, "Server is Enterprise %s Edition, %s and will expire at %s.\r\n", serverVersion, sinfo, + expiretime); } taos_free_result(tres); @@ -1123,9 +1120,9 @@ void *shellCancelHandler(void *arg) { #ifdef WEBSOCKET } #endif - #ifdef WINDOWS +#ifdef WINDOWS printf("\n%s", shell.info.promptHeader); - #endif +#endif } return NULL; @@ -1165,8 +1162,7 @@ void *shellThreadLoop(void *arg) { } int32_t shellExecute() { - printf(shell.info.clientVersion, shell.info.cusName, - taos_get_client_info(), shell.info.cusName); + printf(shell.info.clientVersion, shell.info.cusName, taos_get_client_info(), shell.info.cusName); fflush(stdout); SShellArgs *pArgs = &shell.args; @@ -1233,13 +1229,13 @@ int32_t shellExecute() { taosSetSignal(SIGTERM, shellQueryInterruptHandler); taosSetSignal(SIGHUP, shellQueryInterruptHandler); taosSetSignal(SIGINT, shellQueryInterruptHandler); - + #ifdef WEBSOCKET if (!shell.args.restful && !shell.args.cloud) { #endif #ifndef WINDOWS printfIntroduction(); -#endif +#endif shellGetGrantInfo(); #ifdef WEBSOCKET } diff --git a/utils/test/c/tmqDemo.c b/utils/test/c/tmqDemo.c index ce069c2b05..64f536433e 100644 --- a/utils/test/c/tmqDemo.c +++ b/utils/test/c/tmqDemo.c @@ -221,7 +221,7 @@ int64_t getDirectorySize(char* dir) { totalSize += subDirSize; } else if (0 == strcmp(strchr(fileName, '.'), ".log")) { // only calc .log file size, and not include .idx file int64_t file_size = 0; - taosStatFile(subdir, &file_size, NULL); + taosStatFile(subdir, &file_size, NULL, NULL); totalSize += file_size; } } @@ -702,4 +702,3 @@ int main(int32_t argc, char* argv[]) { taosCloseFile(&g_fp); return 0; } - From 9e4da6c089b5db3f08716961983053476c207206 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 7 Aug 2023 17:14:58 +0800 Subject: [PATCH 010/147] s3/config: parsing s3 configuration --- source/common/src/tglobal.c | 41 +++++++++++++++++++++++---- source/dnode/vnode/src/vnd/vnodeCos.c | 8 +++--- 2 files changed, 39 insertions(+), 10 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 1f6d0800a5..fbc98715f0 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -236,8 +236,9 @@ bool tsFilterScalarMode = false; int32_t tsKeepTimeOffset = 0; // latency of data migration char tsS3Endpoint[TSDB_FQDN_LEN] = ""; -char tsS3AcessKeyId[TSDB_FQDN_LEN] = ""; -char tsS3AcessKeySecret[TSDB_FQDN_LEN] = ""; +char tsS3AccessKey[TSDB_FQDN_LEN] = ""; +char tsS3AccessKeyId[TSDB_FQDN_LEN] = ""; +char tsS3AccessKeySecret[TSDB_FQDN_LEN] = ""; char tsS3BucketName[TSDB_FQDN_LEN] = ""; char tsS3AppId[TSDB_FQDN_LEN] = ""; int8_t tsS3Enabled = false; @@ -263,6 +264,35 @@ int32_t taosSetTfsCfg(SConfig *pCfg) { int32_t taosSetTfsCfg(SConfig *pCfg); #endif +int32_t taosSetS3Cfg(SConfig *pCfg) { + tstrncpy(tsS3AccessKey, cfgGetItem(pCfg, "s3Accesskey")->str, TSDB_FQDN_LEN); + char *colon = strchr(tsS3AccessKey, ':'); + if (!colon) { + uError("invalid access key:%s", tsS3AccessKey); + return -1; + } + *colon = '\0'; + tstrncpy(tsS3AccessKeyId, tsS3AccessKey, TSDB_FQDN_LEN); + tstrncpy(tsS3AccessKeySecret, colon + 1, TSDB_FQDN_LEN); + tstrncpy(tsS3Endpoint, cfgGetItem(pCfg, "s3Endpoint")->str, TSDB_FQDN_LEN); + tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); + char *cos = strstr(tsS3Endpoint, "cos."); + if (cos) { + char *appid = strrchr(tsS3BucketName, '-'); + if (!appid) { + uError("failed to locate appid in bucket:%s", tsS3BucketName); + return -1; + } else { + tstrncpy(tsS3AppId, appid + 1, TSDB_FQDN_LEN); + } + } + if (tsS3BucketName[0] != '<' && tsDiskCfgNum > 1) { + tsS3Enabled = true; + } + + return 0; +} + struct SConfig *taosGetCfg() { return tsCfg; } @@ -582,6 +612,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "pqSortMemThreshold", tsPQSortMemThreshold, 1, 10240, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3Accesskey", tsS3AccessKey, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddString(pCfg, "s3Endpoint", tsS3Endpoint, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddString(pCfg, "s3BucketName", tsS3BucketName, CFG_SCOPE_SERVER) != 0) return -1; GRANT_CFG_ADD; @@ -972,8 +1004,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32; tsPQSortMemThreshold = cfgGetItem(pCfg, "pqSortMemThreshold")->i32; - tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); - GRANT_CFG_GET; return 0; } @@ -1298,8 +1328,6 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosGetFqdnPortFromEp(strlen(pFirstEpItem->str) == 0 ? defaultFirstEp : pFirstEpItem->str, &firstEp); snprintf(tsFirst, sizeof(tsFirst), "%s:%u", firstEp.fqdn, firstEp.port); cfgSetItem(pCfg, "firstEp", tsFirst, pFirstEpItem->stype); - } else if (strcasecmp("s3BucketName", name) == 0) { - tstrncpy(tsS3BucketName, cfgGetItem(pCfg, "s3BucketName")->str, TSDB_FQDN_LEN); } else if (strcasecmp("sDebugFlag", name) == 0) { sDebugFlag = cfgGetItem(pCfg, "sDebugFlag")->i32; } else if (strcasecmp("smaDebugFlag", name) == 0) { @@ -1498,6 +1526,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile if (taosSetServerCfg(tsCfg)) return -1; if (taosSetReleaseCfg(tsCfg)) return -1; if (taosSetTfsCfg(tsCfg) != 0) return -1; + if (taosSetS3Cfg(tsCfg) != 0) return -1; } taosSetSystemCfg(tsCfg); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index bac38f7c35..a40e046972 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -7,8 +7,8 @@ #include "cos_log.h" extern char tsS3Endpoint[]; -extern char tsS3AcessKeyId[]; -extern char tsS3AcessKeySecret[]; +extern char tsS3AccessKeyId[]; +extern char tsS3AccessKeySecret[]; extern char tsS3BucketName[]; extern char tsS3AppId[]; @@ -41,8 +41,8 @@ static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { cos_config_t *config = options->config; cos_str_set(&config->endpoint, tsS3Endpoint); - cos_str_set(&config->access_key_id, tsS3AcessKeyId); - cos_str_set(&config->access_key_secret, tsS3AcessKeySecret); + cos_str_set(&config->access_key_id, tsS3AccessKeyId); + cos_str_set(&config->access_key_secret, tsS3AccessKeySecret); cos_str_set(&config->appid, tsS3AppId); config->is_cname = is_cname; From 1290f529daf34cb3cfd6fa3cff92b43e94694a10 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 7 Aug 2023 17:17:43 +0800 Subject: [PATCH 011/147] cos/example: turn head object on --- contrib/test/cos/main.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/contrib/test/cos/main.c b/contrib/test/cos/main.c index faaceee2e3..7e5e7c8c8b 100644 --- a/contrib/test/cos/main.c +++ b/contrib/test/cos/main.c @@ -530,7 +530,12 @@ void test_head_object() { s = cos_head_object(options, &bucket, &object, NULL, &resp_headers); print_headers(resp_headers); if (cos_status_is_ok(s)) { - printf("head object succeeded\n"); + long size = 0; + char *content_length_str = (char *)apr_table_get(resp_headers, COS_CONTENT_LENGTH); + if (content_length_str != NULL) { + size = atol(content_length_str); + } + printf("head object succeeded: %ld\n", size); } else { printf("head object failed\n"); } @@ -3045,7 +3050,7 @@ int main(int argc, char *argv[]) { // test_object(); // test_put_object_with_limit(); // test_get_object_with_limit(); - // test_head_object(); + test_head_object(); // test_gen_object_url(); // test_list_objects(); // test_list_directory(); From 864d07e1bcf1168c4eb728fdb9ef3bb2f25f5ff2 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 7 Aug 2023 19:39:06 +0800 Subject: [PATCH 012/147] feat:[TD-24559]support geomety type in schemaless --- source/client/CMakeLists.txt | 4 +-- source/client/inc/clientSml.h | 1 + source/client/src/clientSml.c | 1 + source/client/src/clientSmlLine.c | 39 ++++++++++++++++++---- source/client/test/CMakeLists.txt | 2 +- source/dnode/mnode/impl/src/mndSubscribe.c | 2 +- source/libs/parser/src/parInsertSml.c | 14 ++++++-- source/libs/parser/src/parInsertUtil.c | 2 +- 8 files changed, 50 insertions(+), 15 deletions(-) diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt index d0cfd38fb9..e45ee9c932 100644 --- a/source/client/CMakeLists.txt +++ b/source/client/CMakeLists.txt @@ -16,7 +16,7 @@ target_include_directories( target_link_libraries( taos INTERFACE api - PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom + PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom geometry ) if(TD_DARWIN_ARM64) @@ -57,7 +57,7 @@ target_include_directories( target_link_libraries( taos_static INTERFACE api - PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom + PRIVATE os util common transport nodes parser command planner catalog scheduler function qcom geometry ) if(${BUILD_TEST}) diff --git a/source/client/inc/clientSml.h b/source/client/inc/clientSml.h index 040064560c..11376052b6 100644 --- a/source/client/inc/clientSml.h +++ b/source/client/inc/clientSml.h @@ -33,6 +33,7 @@ extern "C" { #include "ttime.h" #include "ttypes.h" #include "cJSON.h" +#include "geosWrapper.h" #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) # define expect(expr,value) (__builtin_expect ((expr),(value)) ) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index ffff3df5d0..65175110cf 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1191,6 +1191,7 @@ void freeSSmlKv(void *data) { SSmlKv *kv = (SSmlKv *)data; if (kv->keyEscaped) taosMemoryFree((void *)(kv->key)); if (kv->valueEscaped) taosMemoryFree((void *)(kv->value)); + if (kv->type == TSDB_DATA_TYPE_GEOMETRY) geosFreeBuffer((void *)(kv->value)); } void smlDestroyInfo(SSmlHandle *info) { diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index 1ee2cfbedf..9c0b2d7688 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -102,6 +102,30 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { return TSDB_CODE_TSC_INVALID_VALUE; } + if (pVal->value[0] == 'g' || pVal->value[0] == 'G') { // geometry + if (pVal->value[1] == '"' && pVal->value[pVal->length - 1] == '"' && pVal->length >= sizeof("POINT")+3) { + int32_t code = initCtxGeomFromText(); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + char* tmp = taosMemoryCalloc(pVal->length, 1); + memcmp(tmp, pVal->value + 2, pVal->length - 3); + code = doGeomFromText(tmp, (unsigned char **)&pVal->value, &pVal->length); + taosMemoryFree(tmp); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + pVal->type = TSDB_DATA_TYPE_GEOMETRY; + if (pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { + geosFreeBuffer((void*)(pVal->value)); + return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN; + } + return TSDB_CODE_SUCCESS; + } + return TSDB_CODE_TSC_INVALID_VALUE; + } + if (pVal->value[0] == 't' || pVal->value[0] == 'T') { if (pVal->length == 1 || (pVal->length == 4 && (pVal->value[1] == 'r' || pVal->value[1] == 'R') && @@ -390,7 +414,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin SSmlKv kv = {.key = tag->name, .keyLen = strlen(tag->name), .type = tag->type}; if (tag->type == TSDB_DATA_TYPE_NCHAR) { kv.length = (tag->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE; - } else if (tag->type == TSDB_DATA_TYPE_BINARY) { + } else if (tag->type == TSDB_DATA_TYPE_BINARY || tag->type == TSDB_DATA_TYPE_GEOMETRY) { kv.length = tag->bytes - VARSTR_HEADER_SIZE; } taosArrayPush((*tmp)->cols, &kv); @@ -663,14 +687,15 @@ int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLine if (info->dataFormat) { uDebug("SML:0x%" PRIx64 " smlParseInfluxString format true, ts:%" PRId64, info->id, ts); ret = smlBuildCol(info->currTableDataCtx, info->currSTableMeta->schema, &kv, 0); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - ret = smlBuildRow(info->currTableDataCtx); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + if (ret == TSDB_CODE_SUCCESS) { + ret = smlBuildRow(info->currTableDataCtx); } + clearColValArray(info->currTableDataCtx->pValues); + if (unlikely(ret != TSDB_CODE_SUCCESS)) { + smlBuildInvalidDataMsg(&info->msgBuf, "smlBuildCol error", NULL); + return ret; + } } else { uDebug("SML:0x%" PRIx64 " smlParseInfluxString format false, ts:%" PRId64, info->id, ts); taosArraySet(elements->colArray, 0, &kv); diff --git a/source/client/test/CMakeLists.txt b/source/client/test/CMakeLists.txt index 34c377c6ea..91f0d1eef8 100644 --- a/source/client/test/CMakeLists.txt +++ b/source/client/test/CMakeLists.txt @@ -20,7 +20,7 @@ TARGET_LINK_LIBRARIES( ADD_EXECUTABLE(smlTest smlTest.cpp) TARGET_LINK_LIBRARIES( smlTest - PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom + PUBLIC os util common transport parser catalog scheduler function gtest taos_static qcom geometry ) TARGET_INCLUDE_DIRECTORIES( diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c index 6f50b9ff9f..166e67b15b 100644 --- a/source/dnode/mnode/impl/src/mndSubscribe.c +++ b/source/dnode/mnode/impl/src/mndSubscribe.c @@ -489,7 +489,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR SMqVgEp *pVgEp = taosArrayGetP(pConsumerEpNew->vgs, i); if(pVgEp->vgId == d1->vgId){ jump = true; - mInfo("pSub->offsetRows jump, because consumer id:%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId); + mInfo("pSub->offsetRows jump, because consumer id:0x%"PRIx64 " and vgId:%d not change", pConsumerEp->consumerId, pVgEp->vgId); break; } } diff --git a/source/libs/parser/src/parInsertSml.c b/source/libs/parser/src/parInsertSml.c index 78b05b6df5..577b8961a7 100644 --- a/source/libs/parser/src/parInsertSml.c +++ b/source/libs/parser/src/parInsertSml.c @@ -22,7 +22,7 @@ static void clearColValArray(SArray* pCols) { int32_t num = taosArrayGetSize(pCols); for (int32_t i = 0; i < num; ++i) { SColVal* pCol = taosArrayGet(pCols, i); - if (TSDB_DATA_TYPE_NCHAR == pCol->type) { + if (TSDB_DATA_TYPE_NCHAR == pCol->type || TSDB_DATA_TYPE_GEOMETRY == pCol->type) { taosMemoryFreeClear(pCol->value.pData); } pCol->flag = CV_FLAG_NONE; @@ -237,9 +237,13 @@ int32_t smlBuildCol(STableDataCxt* pTableCxt, SSchema* schema, void* data, int32 } pVal->value.pData = pUcs4; pVal->value.nData = len; - } else if (kv->type == TSDB_DATA_TYPE_BINARY || kv->type == TSDB_DATA_TYPE_GEOMETRY) { + } else if (kv->type == TSDB_DATA_TYPE_BINARY) { pVal->value.nData = kv->length; pVal->value.pData = (uint8_t*)kv->value; + } else if (kv->type == TSDB_DATA_TYPE_GEOMETRY) { + pVal->value.nData = kv->length; + pVal->value.pData = taosMemoryMalloc(kv->length); + memcpy(pVal->value.pData, (uint8_t*)kv->value, kv->length); } else { memcpy(&pVal->value.val, &(kv->value), kv->length); } @@ -364,9 +368,13 @@ int32_t smlBindData(SQuery* query, bool dataFormat, SArray* tags, SArray* colsSc } pVal->value.pData = pUcs4; pVal->value.nData = len; - } else if (kv->type == TSDB_DATA_TYPE_BINARY || kv->type == TSDB_DATA_TYPE_GEOMETRY) { + } else if (kv->type == TSDB_DATA_TYPE_BINARY) { pVal->value.nData = kv->length; pVal->value.pData = (uint8_t*)kv->value; + } else if (kv->type == TSDB_DATA_TYPE_GEOMETRY) { + pVal->value.nData = kv->length; + pVal->value.pData = taosMemoryMalloc(kv->length); + memcpy(pVal->value.pData, (uint8_t*)kv->value, kv->length); } else { memcpy(&pVal->value.val, &(kv->value), kv->length); } diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c index de7d154db6..33699ed857 100644 --- a/source/libs/parser/src/parInsertUtil.c +++ b/source/libs/parser/src/parInsertUtil.c @@ -333,7 +333,7 @@ int32_t insGetTableDataCxt(SHashObj* pHash, void* id, int32_t idLen, STableMeta* static void destroyColVal(void* p) { SColVal* pVal = p; - if (TSDB_DATA_TYPE_NCHAR == pVal->type) { + if (TSDB_DATA_TYPE_NCHAR == pVal->type || TSDB_DATA_TYPE_GEOMETRY == pVal->type) { taosMemoryFree(pVal->value.pData); } } From d0335bec974ee1516d74a6bafd196b0fea2859bd Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 8 Aug 2023 09:49:31 +0800 Subject: [PATCH 013/147] feat:[TD-24559]support geomety type in schemaless --- source/client/src/clientSmlLine.c | 2 +- utils/test/c/sml_test.c | 33 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index 9c0b2d7688..d6f405e69d 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -109,7 +109,7 @@ int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg) { return code; } char* tmp = taosMemoryCalloc(pVal->length, 1); - memcmp(tmp, pVal->value + 2, pVal->length - 3); + memcpy(tmp, pVal->value + 2, pVal->length - 3); code = doGeomFromText(tmp, (unsigned char **)&pVal->value, &pVal->length); taosMemoryFree(tmp); if (code != TSDB_CODE_SUCCESS) { diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c index e4ed6037a3..237bfc5092 100644 --- a/utils/test/c/sml_test.c +++ b/utils/test/c/sml_test.c @@ -1552,12 +1552,45 @@ int sml_ts3724_Test() { return code; } +int sml_td24559_Test() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0); + + TAOS_RES *pRes = taos_query(taos, "drop database if exists td24559"); + taos_free_result(pRes); + + pRes = taos_query(taos, "create database if not exists td24559"); + taos_free_result(pRes); + + const char *sql[] = { + "stb,t1=1 f1=283i32,f2=g\"Point(4.343 89.342)\" 1632299372000", + "stb,t1=1 f2=G\"Point(4.343 89.342)\",f1=106i32 1632299373000", + "stb,t2=1 f2=G\"Point(4.343 89.342)\",f1=106i32 1632299374000", + "stb,t1=1 f1=106i32,f2=G\"GEOMETRYCOLLECTION (MULTIPOINT((0 0), (1 1)), POINT(3 4), LINESTRING(2 3, 3 4))\" 1632299378000", + }; + + pRes = taos_query(taos, "use td24559"); + taos_free_result(pRes); + + pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, + TSDB_SML_TIMESTAMP_MILLI_SECONDS); + + int code = taos_errno(pRes); + printf("%s result0:%s\n", __FUNCTION__, taos_errstr(pRes)); + taos_free_result(pRes); + + taos_close(taos); + + return code; +} + int main(int argc, char *argv[]) { if (argc == 2) { taos_options(TSDB_OPTION_CONFIGDIR, argv[1]); } int ret = 0; + ret = sml_td24559_Test(); + ASSERT(!ret); ret = sml_td24070_Test(); ASSERT(!ret); ret = sml_td23881_Test(); From 607132c18a4e00607a4fe64c2365a4e68c222d0e Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 8 Aug 2023 10:33:55 +0800 Subject: [PATCH 014/147] feat:[TD-24559]support geomety type in schemaless --- source/client/inc/clientSml.h | 2 +- source/client/src/clientSmlLine.c | 6 +++--- tests/system-test/2-query/sml.py | 5 +++++ 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/source/client/inc/clientSml.h b/source/client/inc/clientSml.h index 11376052b6..1839c14894 100644 --- a/source/client/inc/clientSml.h +++ b/source/client/inc/clientSml.h @@ -193,7 +193,7 @@ typedef struct { // SArray *preLineTagKV; SArray *maxTagKVs; - SArray *masColKVs; + SArray *maxColKVs; SSmlLineInfo preLine; STableMeta *currSTableMeta; diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c index d6f405e69d..558c5f4ddb 100644 --- a/source/client/src/clientSmlLine.c +++ b/source/client/src/clientSmlLine.c @@ -421,7 +421,7 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin } } info->currSTableMeta = (*tmp)->tableMeta; - info->masColKVs = (*tmp)->cols; + info->maxColKVs = (*tmp)->cols; } } @@ -536,13 +536,13 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin freeSSmlKv(&kv); return TSDB_CODE_SUCCESS; } - if (cnt >= taosArrayGetSize(info->masColKVs)) { + if (cnt >= taosArrayGetSize(info->maxColKVs)) { info->dataFormat = false; info->reRun = true; freeSSmlKv(&kv); return TSDB_CODE_SUCCESS; } - SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->masColKVs, cnt); + SSmlKv *maxKV = (SSmlKv *)taosArrayGet(info->maxColKVs, cnt); if (kv.type != maxKV->type) { info->dataFormat = false; info->reRun = true; diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py index b3aeb72194..cae012ece1 100644 --- a/tests/system-test/2-query/sml.py +++ b/tests/system-test/2-query/sml.py @@ -110,6 +110,11 @@ class TDTestCase: tdSql.query(f"select * from ts3724.`stb2.`") tdSql.checkRows(1) + + # tdSql.query(f"select * from td24559.stb order by _ts") + # tdSql.checkRows(4) + # tdSql.checkData(0, 2, "POINT (4.343000 89.342000)") + # tdSql.checkData(3, 2, "GEOMETRYCOLLECTION (MULTIPOINT ((0.000000 0.000000), (1.000000 1.000000)), POINT (3.000000 4.000000), LINESTRING (2.000000 3.000000, 3.000000 4.000000))") return def run(self): From d322cfce50b4be2c76219aa1e5d6c807b1694312 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 8 Aug 2023 16:28:51 +0800 Subject: [PATCH 015/147] fix:add hashIteratorCancel for hash iterator --- source/client/src/clientSml.c | 5 +++++ source/dnode/vnode/src/sma/smaRollup.c | 1 + source/dnode/vnode/src/tq/tqRead.c | 1 + 3 files changed, 7 insertions(+) diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index 65175110cf..cad32842a0 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1073,6 +1073,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) { return 0; end: + taosHashCancelIterate(info->superTables, tmp); taosHashCleanup(hashTmp); taosMemoryFreeClear(pTableMeta); catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1); @@ -1434,6 +1435,7 @@ static int32_t smlInsertData(SSmlHandle *info) { code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE); if(code != TSDB_CODE_SUCCESS){ taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); return code; } @@ -1442,6 +1444,7 @@ static int32_t smlInsertData(SSmlHandle *info) { if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " catalogGetTableHashVgroup failed. table name: %s", info->id, tableData->childTableName); taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); return code; } taosHashPut(info->pVgHash, (const char *)&vg.vgId, sizeof(vg.vgId), (char *)&vg, sizeof(vg)); @@ -1451,6 +1454,7 @@ static int32_t smlInsertData(SSmlHandle *info) { if (unlikely(NULL == pMeta || NULL == (*pMeta)->tableMeta)) { uError("SML:0x%" PRIx64 " NULL == pMeta. table name: %s", info->id, tableData->childTableName); taosMemoryFree(measure); + taosHashCancelIterate(info->childTables, oneTable); return TSDB_CODE_SML_INTERNAL_ERROR; } @@ -1466,6 +1470,7 @@ static int32_t smlInsertData(SSmlHandle *info) { taosMemoryFree(measure); if (code != TSDB_CODE_SUCCESS) { uError("SML:0x%" PRIx64 " smlBindData failed", info->id); + taosHashCancelIterate(info->childTables, oneTable); return code; } oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, oneTable); diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 9fd4938448..2813f9059c 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -905,6 +905,7 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, int64_t version, void *pReq, void *pMsg, tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); if (tdExecuteRSmaAsync(pSma, version, pMsg, len, inputType, *pTbSuid) < 0) { smaError("vgId:%d, failed to process rsma submit exec 2 since: %s", SMA_VID(pSma), terrstr()); + taosHashCancelIterate(uidStore.uidHash, pIter); goto _err; } } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 9b8f1781cb..046502b49f 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -1095,6 +1095,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { if(ret != TDB_CODE_SUCCESS) { tqError("qGetTableList in tqUpdateTbUidList error:%d handle %s consumer:0x%" PRIx64, ret, pTqHandle->subKey, pTqHandle->consumerId); taosArrayDestroy(list); + taosHashCancelIterate(pTq->pHandle, pIter); return ret; } tqReaderSetTbUidList(pTqHandle->execHandle.pTqReader, list, NULL); From 16e015253b2c3f01981914f34743794e1d203e94 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:10:25 +0800 Subject: [PATCH 016/147] s3/mxml: remove os external dependency --- contrib/CMakeLists.txt | 31 ++++++++++++++++++++++++++++--- contrib/test/cos/CMakeLists.txt | 4 ++-- source/dnode/vnode/CMakeLists.txt | 2 +- 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index df9519d00f..db4d359938 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -124,6 +124,9 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) + file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif(${BUILD_WITH_COS}) @@ -157,6 +160,21 @@ if(${BUILD_GEOS}) cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif() +# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=unused-function") +# include(ExternalProject) +# ExternalProject_Add(mxml +# GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git +# GIT_TAG release-2.10 +# SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" +# #BINARY_DIR "" +# BUILD_IN_SOURCE TRUE +# CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build +# BUILD_COMMAND make +# INSTALL_COMMAND make install +# TEST_COMMAND "" +# ) + + # download dependencies configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . @@ -355,7 +373,10 @@ endif() # cos if(${BUILD_WITH_COS}) + if(NOT ${TD_WINDOWS}) + #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) + INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) @@ -363,11 +384,15 @@ if(${BUILD_WITH_COS}) add_subdirectory(cos-c-sdk-v5 EXCLUDE_FROM_ALL) target_include_directories( - cos_c_sdk - PUBLIC $ - ) + cos_c_sdk + PUBLIC $ + ) set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) + + else() + + endif(NOT ${TD_WINDOWS}) endif(${BUILD_WITH_COS}) # lucene diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 77c57e5a65..3eb484c2c5 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -39,11 +39,11 @@ target_include_directories( find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -find_library(MINIXML_LIBRARY mxml) +#find_library(MINIXML_LIBRARY mxml) find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) target_link_libraries(cosTest ${APR_LIBRARY}) -target_link_libraries(cosTest ${MINIXML_LIBRARY}) +target_link_libraries(cosTest mxml) target_link_libraries(cosTest ${CURL_LIBRARY}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 0612f924f5..eea81ea3d2 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -137,7 +137,7 @@ endif() find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -find_library(MINIXML_LIBRARY mxml) +#find_library(MINIXML_LIBRARY mxml) find_library(CURL_LIBRARY curl) target_link_libraries( From c4f7b5d530a58026b1b1b7b64b535e1e12887ecc Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:23:00 +0800 Subject: [PATCH 017/147] mxml: makefile for mxml --- cmake/mxml_CMakeLists.txt.in | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 cmake/mxml_CMakeLists.txt.in diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in new file mode 100644 index 0000000000..994aa6e2cb --- /dev/null +++ b/cmake/mxml_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# cos +ExternalProject_Add(mxml + GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git + GIT_TAG release-2.10 + SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" + #BINARY_DIR "" + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND make + INSTALL_COMMAND make install + TEST_COMMAND "" +) From 4d1155a5cfe6a329a4bb728e1c4aa3c3e3600a77 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:43:27 +0800 Subject: [PATCH 018/147] curl: makefile for curl --- cmake/curl_CMakeLists.txt.in | 12 ++++++++++++ contrib/test/cos/CMakeLists.txt | 4 ++-- source/dnode/vnode/CMakeLists.txt | 6 +++--- 3 files changed, 17 insertions(+), 5 deletions(-) create mode 100644 cmake/curl_CMakeLists.txt.in diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in new file mode 100644 index 0000000000..a23c5e7bab --- /dev/null +++ b/cmake/curl_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# curl +ExternalProject_Add(curl + GIT_REPOSITORY https://github.com/curl/curl.git + GIT_TAG curl-7_88_1 + SOURCE_DIR "${TD_CONTRIB_DIR}/curl" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 3eb484c2c5..38de8a25e8 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -40,10 +40,10 @@ target_include_directories( find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) -find_library(CURL_LIBRARY curl) +#find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) target_link_libraries(cosTest ${APR_LIBRARY}) target_link_libraries(cosTest mxml) -target_link_libraries(cosTest ${CURL_LIBRARY}) +target_link_libraries(cosTest curl) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index eea81ea3d2..562207268c 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -138,7 +138,7 @@ endif() find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) -find_library(CURL_LIBRARY curl) +#find_library(CURL_LIBRARY curl) target_link_libraries( vnode @@ -164,8 +164,8 @@ target_link_libraries( cos_c_sdk ${APR_UTIL_LIBRARY} ${APR_LIBRARY} - ${MINIXML_LIBRARY} - ${CURL_LIBRARY} + mxml + curl ) IF (TD_GRANT) From 7114ad63e5ac8846a11e221079031ee22bccc4f1 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 10:54:17 +0800 Subject: [PATCH 019/147] =?UTF-8?q?apr=EF=BC=9Amakefile=20for=20apr=20&=20?= =?UTF-8?q?apr-util?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cmake/apr-util_CMakeLists.txt.in | 12 ++++++++++++ cmake/apr_CMakeLists.txt.in | 12 ++++++++++++ contrib/test/cos/CMakeLists.txt | 8 ++++---- source/dnode/vnode/CMakeLists.txt | 8 ++++---- 4 files changed, 32 insertions(+), 8 deletions(-) create mode 100644 cmake/apr-util_CMakeLists.txt.in create mode 100644 cmake/apr_CMakeLists.txt.in diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in new file mode 100644 index 0000000000..8471a05db6 --- /dev/null +++ b/cmake/apr-util_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# apr-util +ExternalProject_Add(apr + GIT_REPOSITORY https://github.com/apache/apr-util.git + GIT_TAG 1.5.4 + SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in new file mode 100644 index 0000000000..68b6f39c89 --- /dev/null +++ b/cmake/apr_CMakeLists.txt.in @@ -0,0 +1,12 @@ +# apr +ExternalProject_Add(apr + GIT_REPOSITORY https://github.com/apache/apr.git + GIT_TAG 1.5.2 + SOURCE_DIR "${TD_CONTRIB_DIR}/apr" + BINARY_DIR "" + #BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 38de8a25e8..2d2e101877 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -37,13 +37,13 @@ target_include_directories( PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" ) -find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) #find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) -target_link_libraries(cosTest ${APR_UTIL_LIBRARY}) -target_link_libraries(cosTest ${APR_LIBRARY}) +target_link_libraries(cosTest apr}) +target_link_libraries(cosTest apr-util}) target_link_libraries(cosTest mxml) target_link_libraries(cosTest curl) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 562207268c..cf7d205c00 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,8 +135,8 @@ else() endif() endif() -find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) #find_library(MINIXML_LIBRARY mxml) #find_library(CURL_LIBRARY curl) @@ -162,8 +162,8 @@ target_link_libraries( # s3 cos_c_sdk - ${APR_UTIL_LIBRARY} - ${APR_LIBRARY} + apr + apr-util mxml curl ) From 2e0519b9609d98a119b8908199c073dd8cb07d9f Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 11:01:42 +0800 Subject: [PATCH 020/147] apr: fix apr & apr-util project names --- cmake/apr-util_CMakeLists.txt.in | 2 +- cmake/apr_CMakeLists.txt.in | 2 +- contrib/test/cos/CMakeLists.txt | 4 ++-- source/dnode/vnode/CMakeLists.txt | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 8471a05db6..c4dd943243 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -1,5 +1,5 @@ # apr-util -ExternalProject_Add(apr +ExternalProject_Add(aprutil-1 GIT_REPOSITORY https://github.com/apache/apr-util.git GIT_TAG 1.5.4 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 68b6f39c89..bfbe8196d3 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -1,5 +1,5 @@ # apr -ExternalProject_Add(apr +ExternalProject_Add(apr-1 GIT_REPOSITORY https://github.com/apache/apr.git GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" diff --git a/contrib/test/cos/CMakeLists.txt b/contrib/test/cos/CMakeLists.txt index 2d2e101877..f8804033de 100644 --- a/contrib/test/cos/CMakeLists.txt +++ b/contrib/test/cos/CMakeLists.txt @@ -43,7 +43,7 @@ target_include_directories( #find_library(CURL_LIBRARY curl) target_link_libraries(cosTest cos_c_sdk) -target_link_libraries(cosTest apr}) -target_link_libraries(cosTest apr-util}) +target_link_libraries(cosTest apr-1}) +target_link_libraries(cosTest aprutil-1}) target_link_libraries(cosTest mxml) target_link_libraries(cosTest curl) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index cf7d205c00..a219990690 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -162,8 +162,8 @@ target_link_libraries( # s3 cos_c_sdk - apr - apr-util + apr-1 + aprutil-1 mxml curl ) From 57ba106371bbbcbf83b14ce8668b798aae861bbe Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 11:04:53 +0800 Subject: [PATCH 021/147] cos: move cmake prefix path after external building --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index db4d359938..0ef799e9da 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -125,7 +125,6 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) - set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) @@ -374,6 +373,7 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) + set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) From 398567ef4ca524cd74ee3f203ba6512cde7f523d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 11:14:00 +0800 Subject: [PATCH 022/147] contrib/cmake: add apr apr-util, and curl into makefile --- contrib/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 0ef799e9da..053266c533 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -125,6 +125,9 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) From 7e2859ed43e8ebf375a5808b0d35c3c191a725b6 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 13:03:37 +0800 Subject: [PATCH 023/147] apr: use tarball to avoid ./buildconf --- cmake/apr-util_CMakeLists.txt.in | 18 +++++++++++------- cmake/apr_CMakeLists.txt.in | 21 ++++++++++++++------- cmake/curl_CMakeLists.txt.in | 9 ++++----- contrib/CMakeLists.txt | 2 +- source/dnode/vnode/CMakeLists.txt | 16 ++++++++-------- 5 files changed, 38 insertions(+), 28 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index c4dd943243..b81745aeef 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -1,12 +1,16 @@ # apr-util ExternalProject_Add(aprutil-1 - GIT_REPOSITORY https://github.com/apache/apr-util.git - GIT_TAG 1.5.4 + URL https://dlcdn.apache.org//apr/apr-util-1.6.3.tar.gz + URL_HASH SHA256=2b74d8932703826862ca305b094eef2983c27b39d5c9414442e9976a9acf1983 + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/apache/apr-util.git + #GIT_TAG 1.5.4 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" - BINARY_DIR "" - #BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" + #BINARY_DIR "" + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build + BUILD_COMMAND make + INSTALL_COMMAND make install TEST_COMMAND "" ) diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index bfbe8196d3..037c2ee6cc 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -1,12 +1,19 @@ # apr ExternalProject_Add(apr-1 - GIT_REPOSITORY https://github.com/apache/apr.git - GIT_TAG 1.5.2 + URL https://dlcdn.apache.org//apr/apr-1.7.4.tar.gz + URL_HASH SHA256=a4137dd82a185076fa50ba54232d920a17c6469c30b0876569e1c2a05ff311d9 + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/apache/apr.git + #GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" - BINARY_DIR "" - #BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" + #BINARY_DIR "${CMAKE_BINARY_DIR}/build" + BUILD_IN_SOURCE TRUE + #CONFIGURE_COMMAND "" + #BUILD_COMMAND "" + #INSTALL_COMMAND "" + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND make + INSTALL_COMMAND make install TEST_COMMAND "" ) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index a23c5e7bab..cec4dda004 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -3,10 +3,9 @@ ExternalProject_Add(curl GIT_REPOSITORY https://github.com/curl/curl.git GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" - BINARY_DIR "" - #BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND "" - BUILD_COMMAND "" - INSTALL_COMMAND "" + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND make + INSTALL_COMMAND make install TEST_COMMAND "" ) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 053266c533..058b2cf042 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -125,10 +125,10 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif(${BUILD_WITH_COS}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index a219990690..0612f924f5 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,10 +135,10 @@ else() endif() endif() -#find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -#find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -#find_library(MINIXML_LIBRARY mxml) -#find_library(CURL_LIBRARY curl) +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) target_link_libraries( vnode @@ -162,10 +162,10 @@ target_link_libraries( # s3 cos_c_sdk - apr-1 - aprutil-1 - mxml - curl + ${APR_UTIL_LIBRARY} + ${APR_LIBRARY} + ${MINIXML_LIBRARY} + ${CURL_LIBRARY} ) IF (TD_GRANT) From 56b348abf2e821fd58834566ff878ffc988bbf34 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 13:15:25 +0800 Subject: [PATCH 024/147] curl: use tarball --- cmake/curl_CMakeLists.txt.in | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index cec4dda004..cbfe939219 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -1,7 +1,10 @@ # curl ExternalProject_Add(curl - GIT_REPOSITORY https://github.com/curl/curl.git - GIT_TAG curl-7_88_1 + URL https://curl.se/download/curl-8.2.1.tar.gz + DOWNLOAD_NO_PROGRESS 1 + DOWNLOAD_DIR "${TD_CONTRIB_DIR}/deps-download" + #GIT_REPOSITORY https://github.com/curl/curl.git + #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ From 376a2c2520dc3c5887bbb10f17a143ba9aeec407 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 13:31:18 +0800 Subject: [PATCH 025/147] curl: with openssl building --- cmake/curl_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index cbfe939219..e411cd893c 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -7,7 +7,7 @@ ExternalProject_Add(curl #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-openssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" From b08d5b4d42de9ae814c695478d7eee9c28616573 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 14:02:10 +0800 Subject: [PATCH 026/147] mxml: add include dir to vnode --- source/dnode/vnode/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 0612f924f5..6c107e0a22 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -195,6 +195,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" + PUBLIC "${CMAKE_BINARY_DIR}/build/include" ) if(${BUILD_TEST}) From 30cbbc425fdf2a17574c77bea6846ca15af6ba75 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 14:31:17 +0800 Subject: [PATCH 027/147] cos: new update command to build every cmake --- cmake/apr-util_CMakeLists.txt.in | 1 + cmake/apr_CMakeLists.txt.in | 5 +---- cmake/curl_CMakeLists.txt.in | 3 ++- cmake/mxml_CMakeLists.txt.in | 1 + 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index b81745aeef..ee30787cb6 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -9,6 +9,7 @@ ExternalProject_Add(aprutil-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" #BINARY_DIR "" BUILD_IN_SOURCE TRUE + UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 037c2ee6cc..fa124de62c 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -7,11 +7,8 @@ ExternalProject_Add(apr-1 #GIT_REPOSITORY https://github.com/apache/apr.git #GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" - #BINARY_DIR "${CMAKE_BINARY_DIR}/build" BUILD_IN_SOURCE TRUE - #CONFIGURE_COMMAND "" - #BUILD_COMMAND "" - #INSTALL_COMMAND "" + UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index e411cd893c..47c4fd72a1 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -7,7 +7,8 @@ ExternalProject_Add(curl #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-openssl + UPDATE_COMMAND "" + CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 994aa6e2cb..12c9ea7d89 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -5,6 +5,7 @@ ExternalProject_Add(mxml SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" #BINARY_DIR "" BUILD_IN_SOURCE TRUE + UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ BUILD_COMMAND make INSTALL_COMMAND make install From 5d0edcd17b16bc557087a7f148d9f6b2d69d39d4 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 15:41:23 +0800 Subject: [PATCH 028/147] cos: use /usr/local as prefix instead of debug/build --- cmake/apr-util_CMakeLists.txt.in | 3 ++- cmake/apr_CMakeLists.txt.in | 3 ++- cmake/curl_CMakeLists.txt.in | 3 ++- cmake/mxml_CMakeLists.txt.in | 3 ++- contrib/CMakeLists.txt | 4 ++-- 5 files changed, 10 insertions(+), 6 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index ee30787cb6..fc4f92858c 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -10,7 +10,8 @@ ExternalProject_Add(aprutil-1 #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build + CONFIGURE_COMMAND ./configure --with-apr=/usr/local/ BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index fa124de62c..57e2014c31 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -9,7 +9,8 @@ ExternalProject_Add(apr-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 47c4fd72a1..fcd16a0518 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -8,7 +8,8 @@ ExternalProject_Add(curl SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl + CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 12c9ea7d89..cdd3e5b301 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,8 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 058b2cf042..507928cbe9 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -124,7 +124,7 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + #file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -376,7 +376,7 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) + #set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) From 93ce558abf20e429980a6901344098bf16a0494c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 15:49:34 +0800 Subject: [PATCH 029/147] apu: fix with-apr config --- cmake/apr-util_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index fc4f92858c..96a8b3ef75 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build - CONFIGURE_COMMAND ./configure --with-apr=/usr/local/ + CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" From bc64d5f769f6d5fdb7baf7b43e81ef2708fa04b8 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 16:53:19 +0800 Subject: [PATCH 030/147] cos: use ~/local as prefix for building --- cmake/apr-util_CMakeLists.txt.in | 4 ++-- cmake/apr_CMakeLists.txt.in | 4 ++-- cmake/curl_CMakeLists.txt.in | 4 ++-- cmake/mxml_CMakeLists.txt.in | 4 ++-- contrib/CMakeLists.txt | 4 ++-- tests/parallel_test/container_build.sh | 1 + 6 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 96a8b3ef75..1ae52c69af 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -10,8 +10,8 @@ ExternalProject_Add(aprutil-1 #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --with-apr=${CMAKE_BINARY_DIR}/build - CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ --with-apr=$ENV{HOME}/local + #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 57e2014c31..1df68919ae 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -9,8 +9,8 @@ ExternalProject_Add(apr-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ - CONFIGURE_COMMAND ./configure + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ + #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index fcd16a0518..b09e85b9b2 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -8,8 +8,8 @@ ExternalProject_Add(curl SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ --without-ssl - CONFIGURE_COMMAND ./configure --without-ssl + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local --without-ssl + #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index cdd3e5b301..33dc48ab4e 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,8 +6,8 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE UPDATE_COMMAND "" - #CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ - CONFIGURE_COMMAND ./configure + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local + #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install TEST_COMMAND "" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 507928cbe9..cc93226d68 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -124,7 +124,7 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) - #file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/build/) + file(MAKE_DIRECTORY $ENV{HOME}/local/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -376,7 +376,7 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - #set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR}/build) + set(CMAKE_PREFIX_PATH $ENV{HOME}/local) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 5ae061072a..699a4667dd 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -88,6 +88,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/local:/root/local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From 1ce3ef7fd52c3b86c2a2a68481cb10fa9c4b2602 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 17:02:16 +0800 Subject: [PATCH 031/147] cos: fix local/include directory --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index cc93226d68..9feefa6947 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -379,7 +379,7 @@ if(${BUILD_WITH_COS}) set(CMAKE_PREFIX_PATH $ENV{HOME}/local) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) + INCLUDE_DIRECTORIES($ENV{HOME}/local/include) set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) From 4911b6c8558beb4948fef3dbd1d6c46dfe630f81 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 17:07:55 +0800 Subject: [PATCH 032/147] container_build: use local as install dir --- tests/parallel_test/container_build.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 699a4667dd..8de8f377fd 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,6 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ + -v /root/local:/root/local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From a5ccc3e8aa39f91a85931129dffc6aeea8e34767 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 17:53:25 +0800 Subject: [PATCH 033/147] apr: make apr, apu, curl build always --- cmake/apr-util_CMakeLists.txt.in | 3 ++- cmake/apr_CMakeLists.txt.in | 4 +++- cmake/curl_CMakeLists.txt.in | 3 ++- cmake/mxml_CMakeLists.txt.in | 2 +- contrib/CMakeLists.txt | 15 --------------- 5 files changed, 8 insertions(+), 19 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 1ae52c69af..c64e4ffcdb 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -9,7 +9,8 @@ ExternalProject_Add(aprutil-1 SOURCE_DIR "${TD_CONTRIB_DIR}/apr-util" #BINARY_DIR "" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ --with-apr=$ENV{HOME}/local #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 1df68919ae..bae8cfe0a6 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -8,7 +8,9 @@ ExternalProject_Add(apr-1 #GIT_TAG 1.5.2 SOURCE_DIR "${TD_CONTRIB_DIR}/apr" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + UPDATE_DISCONNECTED TRUE + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ #CONFIGURE_COMMAND ./configure BUILD_COMMAND make diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index b09e85b9b2..27457ffdbc 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -7,7 +7,8 @@ ExternalProject_Add(curl #GIT_TAG curl-7_88_1 SOURCE_DIR "${TD_CONTRIB_DIR}/curl" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + BUILD_ALWAYS 1 + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local --without-ssl #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 33dc48ab4e..f9b7e8e642 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -5,7 +5,7 @@ ExternalProject_Add(mxml SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" #BINARY_DIR "" BUILD_IN_SOURCE TRUE - UPDATE_COMMAND "" + #UPDATE_COMMAND "" CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local #CONFIGURE_COMMAND ./configure BUILD_COMMAND make diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 9feefa6947..3fb7b93abe 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -162,21 +162,6 @@ if(${BUILD_GEOS}) cat("${TD_SUPPORT_DIR}/geos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif() -# SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=unused-function") -# include(ExternalProject) -# ExternalProject_Add(mxml -# GIT_REPOSITORY https://github.com/michaelrsweet/mxml.git -# GIT_TAG release-2.10 -# SOURCE_DIR "${TD_CONTRIB_DIR}/mxml" -# #BINARY_DIR "" -# BUILD_IN_SOURCE TRUE -# CONFIGURE_COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build -# BUILD_COMMAND make -# INSTALL_COMMAND make install -# TEST_COMMAND "" -# ) - - # download dependencies configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . From a853d9d40cd9c937417d90692ad62c58e020e486 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 18:21:55 +0800 Subject: [PATCH 034/147] mxml: use ~/local/include --- source/dnode/vnode/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 6c107e0a22..e6af282d10 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -195,7 +195,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "${CMAKE_BINARY_DIR}/build/include" + PUBLIC "$ENV{HOME}/local/include" ) if(${BUILD_TEST}) From 873262ec784e8fd2f318276dddcb347fe82f2ffc Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Wed, 9 Aug 2023 20:45:05 +0800 Subject: [PATCH 035/147] fix:add test cases to cases.task --- tests/parallel_test/cases.task | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index fb67ee51cd..6b9a577fd5 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -126,6 +126,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_taosx.py +,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_offset.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/raw_block_interface_test.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqSubscribeStb-r3.py -N 5 From b61216d1a85598e9e4c15352799a181e72bfd87b Mon Sep 17 00:00:00 2001 From: danielclow <106956386+danielclow@users.noreply.github.com> Date: Wed, 9 Aug 2023 23:28:54 +0800 Subject: [PATCH 036/147] docs: update 06-stream.md to correct sample sql statement the existing sql statement failed due to invalid syntax on "select _wstart as start". copying the statement from the zh document instead --- docs/en/07-develop/06-stream.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/07-develop/06-stream.md b/docs/en/07-develop/06-stream.md index 125173e60b..59a6b815cf 100644 --- a/docs/en/07-develop/06-stream.md +++ b/docs/en/07-develop/06-stream.md @@ -52,7 +52,7 @@ CREATE TABLE d1004 USING meters TAGS ("California.LosAngeles", 3); ### Create a Stream ```sql -create stream current_stream into current_stream_output_stb as select _wstart as start, _wend as end, max(current) as max_current from meters where voltage <= 220 interval (5s); +create stream current_stream trigger at_once into current_stream_output_stb as select _wstart as wstart, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s); ``` ### Write Data From ca5571d0d6fb05b622e93d90bc3d5f30077ad1ec Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 9 Aug 2023 18:48:59 +0800 Subject: [PATCH 037/147] config: fix default configs --- source/common/src/tglobal.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index fbc98715f0..91ab9f62d5 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -266,6 +266,9 @@ int32_t taosSetTfsCfg(SConfig *pCfg); int32_t taosSetS3Cfg(SConfig *pCfg) { tstrncpy(tsS3AccessKey, cfgGetItem(pCfg, "s3Accesskey")->str, TSDB_FQDN_LEN); + if (tsS3AccessKey[0] == '<') { + return 0; + } char *colon = strchr(tsS3AccessKey, ':'); if (!colon) { uError("invalid access key:%s", tsS3AccessKey); From 700d9a6d68888626f234373972a864cebbb66999 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 10 Aug 2023 13:02:34 +0800 Subject: [PATCH 038/147] fix: sma option for show create table --- source/libs/command/src/command.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 8ddf730d5a..921ec41021 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -624,7 +624,7 @@ void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg* } } - if (nSma < pCfg->numOfColumns) { + if (nSma < pCfg->numOfColumns && nSma > 0) { bool smaOn = false; *len += sprintf(buf + VARSTR_HEADER_SIZE + *len, " SMA("); for (int32_t i = 0; i < pCfg->numOfColumns; ++i) { From e2481599b0f71667baea3bcbade89568e56ccbd7 Mon Sep 17 00:00:00 2001 From: liuyao <54liuyao@163.com> Date: Thu, 10 Aug 2023 13:58:46 +0800 Subject: [PATCH 039/147] fix mem leak --- source/libs/executor/src/timewindowoperator.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index d2e385200d..4f793d7064 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1520,6 +1520,10 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) { colDataDestroy(&pInfo->twAggSup.timeWindowData); pInfo->groupResInfo.pRows = taosArrayDestroy(pInfo->groupResInfo.pRows); cleanupExprSupp(&pInfo->scalarSupp); + tSimpleHashCleanup(pInfo->pUpdatedMap); + pInfo->pUpdatedMap = NULL; + pInfo->pUpdated = taosArrayDestroy(pInfo->pUpdated); + taosMemoryFreeClear(param); } From cd63e814500cdd9138e674831e0bbef664fc2b75 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:18:12 +0800 Subject: [PATCH 040/147] cos: separate building phase for apr & apr-util --- contrib/CMakeLists.txt | 42 +++++++++++++++++++++++++------ source/dnode/vnode/CMakeLists.txt | 2 +- 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 3fb7b93abe..e8f6c98efe 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -6,6 +6,35 @@ function(cat IN_FILE OUT_FILE) file(APPEND ${OUT_FILE} "${CONTENTS}") endfunction(cat IN_FILE OUT_FILE) +set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") +configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + +if(${BUILD_WITH_COS}) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local/) + cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) + cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) +endif(${BUILD_WITH_COS}) + +configure_file(${CONTRIB_TMP_FILE3} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") +execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +execute_process(COMMAND "${CMAKE_COMMAND}" --build . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + +set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2") +configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) + +if(${BUILD_WITH_COS}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +endif(${BUILD_WITH_COS}) + +configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") +execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +execute_process(COMMAND "${CMAKE_COMMAND}" --build . + WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -124,11 +153,10 @@ endif(${BUILD_WITH_SQLITE}) # cos if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY $ENV{HOME}/local/) - cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) - cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + #cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) cat("${TD_SUPPORT_DIR}/cos_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) add_definitions(-DUSE_COS) endif(${BUILD_WITH_COS}) @@ -361,10 +389,10 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - set(CMAKE_PREFIX_PATH $ENV{HOME}/local) + set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES($ENV{HOME}/local/include) + INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local/include) set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index e6af282d10..3cfcc9b716 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -195,7 +195,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "$ENV{HOME}/local/include" + PUBLIC "$ENV{HOME}/.cos-local/include" ) if(${BUILD_TEST}) From bb0b80e42df5c0c3b6af41ccdbd4767ccf3684de Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:24:51 +0800 Subject: [PATCH 041/147] cmake: use .cos-local as prebuilt directory --- cmake/apr-util_CMakeLists.txt.in | 2 +- cmake/apr_CMakeLists.txt.in | 2 +- cmake/curl_CMakeLists.txt.in | 2 +- cmake/mxml_CMakeLists.txt.in | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index c64e4ffcdb..6172be380e 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ --with-apr=$ENV{HOME}/local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ --with-apr=$ENV{HOME}/.cos-local #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index bae8cfe0a6..538b45a7f9 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(apr-1 UPDATE_DISCONNECTED TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local/ + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 27457ffdbc..1d9d028848 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local --without-ssl + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --without-ssl #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index f9b7e8e642..87b126d8d3 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,7 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install From f3b56a0687e2b4f0127e3eff22787783f4a59154 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:28:23 +0800 Subject: [PATCH 042/147] apu: fix apr-util building --- contrib/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index e8f6c98efe..d20b205e69 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -26,7 +26,7 @@ set(CONTRIB_TMP_FILE2 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in2") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) if(${BUILD_WITH_COS}) - cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) + cat("${TD_SUPPORT_DIR}/apr-util_CMakeLists.txt.in" ${CONTRIB_TMP_FILE2}) endif(${BUILD_WITH_COS}) configure_file(${CONTRIB_TMP_FILE2} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt") From b739a422e33da2c44842b9afb4674cd1ded0589d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 14:31:11 +0800 Subject: [PATCH 043/147] container-build: use .cos-local for prebuilt building --- tests/parallel_test/container_build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 8de8f377fd..62254984a9 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,7 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/local:/root/local \ + -v /root/.cos-local:/root/.cos-local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ @@ -89,7 +89,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/local:/root/local \ + -v /root/.cos-local:/root/.cos-local \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From 8890fa578185d7605c9348847f6b34b6c7190472 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Aug 2023 15:13:49 +0800 Subject: [PATCH 044/147] fix(stream): execute the stream task directly, instead of executing it in a asynchronized way. --- source/libs/stream/src/streamExec.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index c7da80fdaf..b479931cd2 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -592,14 +592,21 @@ int32_t streamTryExec(SStreamTask* pTask) { if (pTask->status.transferState) { code = streamTransferStateToStreamTask(pTask); if (code != TSDB_CODE_SUCCESS) { + atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); return code; } - streamSchedExec(pTask); - } else { - atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); - qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), - pTask->status.schedStatus); + + // the schedStatus == TASK_SCHED_STATUS__ACTIVE, streamSchedExec cannot be executed, so execute once again by + // call this function (streamExecForAll) directly. + code = streamExecForAll(pTask); + if (code < 0) { + // do nothing + } } + + atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); + qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, + streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus); } else { atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), From c04ada3573e86ddf47a5e5fc67f79557c56b5c66 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 15:28:22 +0800 Subject: [PATCH 045/147] cos: link with static libs --- source/dnode/vnode/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 3cfcc9b716..c036fbc54a 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,6 +135,7 @@ else() endif() endif() +set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) find_library(MINIXML_LIBRARY mxml) From 78fd70202c98f7133c8901e75260f0d4dd030a6f Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 10 Aug 2023 16:18:53 +0800 Subject: [PATCH 046/147] fix:offset error in tmq & add test cases --- source/client/src/clientTmq.c | 10 ++--- source/dnode/vnode/src/inc/tq.h | 2 +- source/dnode/vnode/src/tq/tq.c | 5 +-- source/dnode/vnode/src/tq/tqUtil.c | 62 +++++++++++++-------------- source/libs/wal/src/walRead.c | 6 ++- tests/system-test/7-tmq/tmq_offset.py | 6 +-- utils/test/c/tmq_offset_test.c | 10 +++++ 7 files changed, 54 insertions(+), 47 deletions(-) diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index ae82be2470..b4168046f4 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -1863,10 +1863,10 @@ static int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* p return 0; } -static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* reqOffset, STqOffsetVal* rspOffset, int64_t sver, int64_t ever, int64_t consumerId){ +static void updateVgInfo(SMqClientVg* pVg, STqOffsetVal* reqOffset, STqOffsetVal* rspOffset, int64_t sver, int64_t ever, int64_t consumerId, bool hasData){ if (!pVg->seekUpdated) { tscDebug("consumer:0x%" PRIx64" local offset is update, since seekupdate not set", consumerId); - pVg->offsetInfo.beginOffset = *reqOffset; + if(hasData) pVg->offsetInfo.beginOffset = *reqOffset; pVg->offsetInfo.endOffset = *rspOffset; } else { tscDebug("consumer:0x%" PRIx64" local offset is NOT update, since seekupdate is set", consumerId); @@ -1929,7 +1929,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { pVg->epSet = *pollRspWrapper->pEpset; } - updateVgInfo(pVg, &pDataRsp->reqOffset, &pDataRsp->rspOffset, pDataRsp->head.walsver, pDataRsp->head.walever, tmq->consumerId); + updateVgInfo(pVg, &pDataRsp->reqOffset, &pDataRsp->rspOffset, pDataRsp->head.walsver, pDataRsp->head.walever, tmq->consumerId, pDataRsp->blockNum != 0); char buf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(buf, TSDB_OFFSET_LEN, &pDataRsp->rspOffset); @@ -1979,7 +1979,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { return NULL; } - updateVgInfo(pVg, &pollRspWrapper->metaRsp.rspOffset, &pollRspWrapper->metaRsp.rspOffset, pollRspWrapper->metaRsp.head.walsver, pollRspWrapper->metaRsp.head.walever, tmq->consumerId); + updateVgInfo(pVg, &pollRspWrapper->metaRsp.rspOffset, &pollRspWrapper->metaRsp.rspOffset, pollRspWrapper->metaRsp.head.walsver, pollRspWrapper->metaRsp.head.walever, tmq->consumerId, true); // build rsp SMqMetaRspObj* pRsp = tmqBuildMetaRspFromWrapper(pollRspWrapper); taosFreeQitem(pollRspWrapper); @@ -2007,7 +2007,7 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) { return NULL; } - updateVgInfo(pVg, &pollRspWrapper->taosxRsp.reqOffset, &pollRspWrapper->taosxRsp.rspOffset, pollRspWrapper->taosxRsp.head.walsver, pollRspWrapper->taosxRsp.head.walever, tmq->consumerId); + updateVgInfo(pVg, &pollRspWrapper->taosxRsp.reqOffset, &pollRspWrapper->taosxRsp.rspOffset, pollRspWrapper->taosxRsp.head.walsver, pollRspWrapper->taosxRsp.head.walever, tmq->consumerId, pollRspWrapper->taosxRsp.blockNum != 0); if (pollRspWrapper->taosxRsp.blockNum == 0) { tscDebug("consumer:0x%" PRIx64 " taosx empty block received, vgId:%d, vg total:%" PRId64 ", reqId:0x%" PRIx64, diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 13b991e038..a6a84075b5 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -175,7 +175,7 @@ int32_t extractDelDataBlock(const void* pData, int32_t len, int64_t ver, SStream int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg); int32_t tqDoSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp* pRsp, int32_t epoch, int64_t consumerId, int32_t type, int64_t sver, int64_t ever); -int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq); +int32_t tqInitDataRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset); #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 98695a9e63..65ff1539aa 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -289,9 +289,8 @@ int32_t tqPushEmptyDataRsp(STqHandle* pHandle, int32_t vgId) { } SMqDataRsp dataRsp = {0}; - tqInitDataRsp(&dataRsp, &req); + tqInitDataRsp(&dataRsp, req.reqOffset); dataRsp.blockNum = 0; - dataRsp.rspOffset = dataRsp.reqOffset; char buf[TSDB_OFFSET_LEN] = {0}; tFormatOffset(buf, TSDB_OFFSET_LEN, &dataRsp.reqOffset); tqInfo("tqPushEmptyDataRsp to consumer:0x%"PRIx64 " vgId:%d, offset:%s, reqId:0x%" PRIx64, req.consumerId, vgId, buf, req.reqId); @@ -714,7 +713,7 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) { walReaderValidVersionRange(pHandle->execHandle.pTqReader->pWalReader, &sver, &ever); SMqDataRsp dataRsp = {0}; - tqInitDataRsp(&dataRsp, &req); + tqInitDataRsp(&dataRsp, req.reqOffset); if (req.useSnapshot == true) { tqError("consumer:0x%" PRIx64 " vgId:%d subkey:%s snapshot not support wal info", consumerId, vgId, req.subKey); diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index 1a0663665d..5cbca6e0f2 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -20,8 +20,9 @@ static int32_t tqSendMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp, int32_t vgId); -int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq) { - pRsp->reqOffset = pReq->reqOffset; +int32_t tqInitDataRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { + pRsp->reqOffset = pOffset; + pRsp->rspOffset = pOffset; pRsp->blockData = taosArrayInit(0, sizeof(void*)); pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t)); @@ -35,8 +36,9 @@ int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq) { return 0; } -static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, const SMqPollReq* pReq) { - pRsp->reqOffset = pReq->reqOffset; +static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, STqOffsetVal pOffset) { + pRsp->reqOffset = pOffset; + pRsp->rspOffset = pOffset; pRsp->withTbName = 1; pRsp->withSchema = 1; @@ -69,7 +71,6 @@ static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, const SMqPollReq* pReq) { static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg, bool* pBlockReturned) { uint64_t consumerId = pRequest->consumerId; - STqOffsetVal reqOffset = pRequest->reqOffset; STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pRequest->subKey); int32_t vgId = TD_VID(pTq->pVnode); @@ -86,7 +87,7 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand return 0; } else { // no poll occurs in this vnode for this topic, let's seek to the right offset value. - if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEST) { + if (pRequest->reqOffset.type == TMQ_OFFSET__RESET_EARLIEST) { if (pRequest->useSnapshot) { tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey:%s, vgId:%d, (earliest) set offset to be snapshot", consumerId, pHandle->subKey, vgId); @@ -100,12 +101,12 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand walRefFirstVer(pTq->pVnode->pWal, pHandle->pRef); tqOffsetResetToLog(pOffsetVal, pHandle->pRef->refVer); } - } else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) { + } else if (pRequest->reqOffset.type == TMQ_OFFSET__RESET_LATEST) { walRefLastVer(pTq->pVnode->pWal, pHandle->pRef); SMqDataRsp dataRsp = {0}; - tqInitDataRsp(&dataRsp, pRequest); + tqOffsetResetToLog(pOffsetVal, pHandle->pRef->refVer + 1); - tqOffsetResetToLog(&dataRsp.rspOffset, pHandle->pRef->refVer + 1); + tqInitDataRsp(&dataRsp, *pOffsetVal); tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, (latest) offset reset to %" PRId64, consumerId, pHandle->subKey, vgId, dataRsp.rspOffset.version); int32_t code = tqSendDataRsp(pHandle, pMsg, pRequest, &dataRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId); @@ -113,7 +114,7 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand *pBlockReturned = true; return code; - } else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) { + } else if (pRequest->reqOffset.type == TMQ_OFFSET__RESET_NONE) { tqError("tmq poll: subkey:%s, no offset committed for consumer:0x%" PRIx64 " in vg %d, subkey %s, reset none failed", pHandle->subKey, consumerId, vgId, pRequest->subKey); @@ -125,11 +126,11 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand return 0; } -static void setRequestVersion(STqOffsetVal* offset, int64_t ver){ - if(offset->type == TMQ_OFFSET__LOG){ - offset->version = ver; - } -} +//static void setRequestVersion(STqOffsetVal* offset, int64_t ver){ +// if(offset->type == TMQ_OFFSET__LOG){ +// offset->version = ver; +// } +//} static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg, STqOffsetVal* pOffset) { @@ -138,8 +139,8 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, terrno = 0; SMqDataRsp dataRsp = {0}; - tqInitDataRsp(&dataRsp, pRequest); - dataRsp.reqOffset.type = pOffset->type; // stroe origin type for getting offset in tmq_get_vgroup_offset + tqInitDataRsp(&dataRsp, *pOffset); +// dataRsp.reqOffset.type = pOffset->type; // store origin type for getting offset in tmq_get_vgroup_offset qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId); int code = tqScanData(pTq, pHandle, &dataRsp, pOffset); @@ -160,7 +161,7 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, taosWUnLockLatch(&pTq->lock); } - setRequestVersion(&dataRsp.reqOffset, pOffset->version); +// setRequestVersion(&dataRsp.reqOffset, pOffset->version); code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId); end : { @@ -181,8 +182,8 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, SWalCkHead* pCkHead = NULL; SMqMetaRsp metaRsp = {0}; STaosxRsp taosxRsp = {0}; - tqInitTaosxRsp(&taosxRsp, pRequest); - taosxRsp.reqOffset.type = offset->type; // store origin type for getting offset in tmq_get_vgroup_offset + tqInitTaosxRsp(&taosxRsp, *offset); +// taosxRsp.reqOffset.type = offset->type; // store origin type for getting offset in tmq_get_vgroup_offset if (offset->type != TMQ_OFFSET__LOG) { if (tqScanTaosx(pTq, pHandle, &taosxRsp, &metaRsp, offset) < 0) { @@ -235,7 +236,7 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead, pRequest->reqId) < 0) { tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer); - setRequestVersion(&taosxRsp.reqOffset, offset->version); +// setRequestVersion(&taosxRsp.reqOffset, offset->version); code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId); goto end; } @@ -248,7 +249,7 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, if (pHead->msgType != TDMT_VND_SUBMIT) { if (totalRows > 0) { tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer); - setRequestVersion(&taosxRsp.reqOffset, offset->version); +// setRequestVersion(&taosxRsp.reqOffset, offset->version); code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId); goto end; } @@ -278,7 +279,7 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, if (totalRows >= 4096 || taosxRsp.createTableNum > 0) { tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer + 1); - setRequestVersion(&taosxRsp.reqOffset, offset->version); +// setRequestVersion(&taosxRsp.reqOffset, offset->version); code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, taosxRsp.createTableNum > 0 ? TMQ_MSG_TYPE__POLL_DATA_META_RSP : TMQ_MSG_TYPE__POLL_DATA_RSP, vgId); goto end; } else { @@ -295,15 +296,13 @@ end: } int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg) { - int32_t code = -1; - STqOffsetVal offset = {0}; STqOffsetVal reqOffset = pRequest->reqOffset; // 1. reset the offset if needed - if (IS_OFFSET_RESET_TYPE(reqOffset.type)) { + if (IS_OFFSET_RESET_TYPE(pRequest->reqOffset.type)) { // handle the reset offset cases, according to the consumer's choice. bool blockReturned = false; - code = extractResetOffsetVal(&offset, pTq, pHandle, pRequest, pMsg, &blockReturned); + int32_t code = extractResetOffsetVal(&reqOffset, pTq, pHandle, pRequest, pMsg, &blockReturned); if (code != 0) { return code; } @@ -312,20 +311,17 @@ int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequ if (blockReturned) { return 0; } - } else if(reqOffset.type != 0){ // use the consumer specified offset - // the offset value can not be monotonious increase?? - offset = reqOffset; - } else { + } else if(reqOffset.type == 0){ // use the consumer specified offset uError("req offset type is 0"); return TSDB_CODE_TMQ_INVALID_MSG; } // this is a normal subscribe requirement if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) { - return extractDataAndRspForNormalSubscribe(pTq, pHandle, pRequest, pMsg, &offset); + return extractDataAndRspForNormalSubscribe(pTq, pHandle, pRequest, pMsg, &reqOffset); } else { // todo handle the case where re-balance occurs. // for taosx - return extractDataAndRspForDbStbSubscribe(pTq, pHandle, pRequest, pMsg, &offset); + return extractDataAndRspForDbStbSubscribe(pTq, pHandle, pRequest, pMsg, &reqOffset); } } diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 04a76146c6..54b9576eb1 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -75,7 +75,10 @@ int32_t walNextValidMsg(SWalReader *pReader) { wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64 ", applied index:%" PRId64, pReader->pWal->cfg.vgId, fetchVer, lastVer, committedVer, appliedVer); - + if (fetchVer > appliedVer){ + terrno = TSDB_CODE_WAL_LOG_NOT_EXIST; + return -1; + } while (fetchVer <= appliedVer) { if (walFetchHeadNew(pReader, fetchVer) < 0) { return -1; @@ -97,7 +100,6 @@ int32_t walNextValidMsg(SWalReader *pReader) { } } - terrno = TSDB_CODE_WAL_LOG_NOT_EXIST; return -1; } diff --git a/tests/system-test/7-tmq/tmq_offset.py b/tests/system-test/7-tmq/tmq_offset.py index a39365c13b..33d36eda71 100644 --- a/tests/system-test/7-tmq/tmq_offset.py +++ b/tests/system-test/7-tmq/tmq_offset.py @@ -23,16 +23,16 @@ class TDTestCase: def run(self): tdSql.prepare() buildPath = tdCom.getBuildPath() - cmdStr1 = '%s/build/bin/taosBenchmark -i 10 -B 1 -t 100000 -n 100000 -y &'%(buildPath) + cmdStr1 = '%s/build/bin/taosBenchmark -i 50 -B 1 -t 1000 -n 100000 -y &'%(buildPath) tdLog.info(cmdStr1) os.system(cmdStr1) - time.sleep(20) + time.sleep(10) cmdStr2 = '%s/build/bin/tmq_offset_test &'%(buildPath) tdLog.info(cmdStr2) os.system(cmdStr2) - time.sleep(30) + time.sleep(20) os.system("kill -9 `pgrep taosBenchmark`") result = os.system("kill -9 `pgrep tmq_offset_test`") diff --git a/utils/test/c/tmq_offset_test.c b/utils/test/c/tmq_offset_test.c index 03f710ae16..18931e2548 100644 --- a/utils/test/c/tmq_offset_test.c +++ b/utils/test/c/tmq_offset_test.c @@ -190,6 +190,16 @@ void test_offset(TAOS* pConn){ ASSERT(0); } + for(int i = 0; i < numOfAssign; i++){ + int64_t position = tmq_position(tmq, "tp", pAssign[i].vgId); + if(position == 0) continue; + + printf("position = %lld\n", position); + tmq_commit_offset_sync(tmq, "tp", pAssign[i].vgId, position); + int64_t committed = tmq_committed(tmq, "tp", pAssign[i].vgId); + ASSERT(position == committed); + } + tmq_offset_seek(tmq, "tp", pAssign[0].vgId, pAssign[0].currentOffset); tmq_offset_seek(tmq, "tp", pAssign[1].vgId, pAssign[1].currentOffset); From ab9ad7d088321d8ef47b9ba4c053b21aa06ab5e9 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 10 Aug 2023 17:13:10 +0800 Subject: [PATCH 047/147] fix:offset error in tmq & add test cases --- tests/system-test/7-tmq/tmqMaxTopic.py | 2 +- utils/test/c/tmq_offset_test.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/system-test/7-tmq/tmqMaxTopic.py b/tests/system-test/7-tmq/tmqMaxTopic.py index 62bc9ccb4e..05b699ca00 100644 --- a/tests/system-test/7-tmq/tmqMaxTopic.py +++ b/tests/system-test/7-tmq/tmqMaxTopic.py @@ -36,7 +36,7 @@ class TDTestCase: # tdDnodes[1].cfgDir cfgFile = f"%s/taos.cfg"%(cfgDir) - shellCmd = 'echo "tmqMaxTopicNum %d" >> %s'%(tmqMaxTopicNum, cfgFile) + shellCmd = 'echo tmqMaxTopicNum %d >> %s'%(tmqMaxTopicNum, cfgFile) tdLog.info(" shell cmd: %s"%(shellCmd)) os.system(shellCmd) tdDnodes.stoptaosd(1) diff --git a/utils/test/c/tmq_offset_test.c b/utils/test/c/tmq_offset_test.c index 18931e2548..cecbd615d1 100644 --- a/utils/test/c/tmq_offset_test.c +++ b/utils/test/c/tmq_offset_test.c @@ -194,7 +194,7 @@ void test_offset(TAOS* pConn){ int64_t position = tmq_position(tmq, "tp", pAssign[i].vgId); if(position == 0) continue; - printf("position = %lld\n", position); + printf("position = %d\n", (int)position); tmq_commit_offset_sync(tmq, "tp", pAssign[i].vgId, position); int64_t committed = tmq_committed(tmq, "tp", pAssign[i].vgId); ASSERT(position == committed); From b2e615d4e70a78a8ef80bf4cf3e4ce7af6c9381e Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 10 Aug 2023 17:30:01 +0800 Subject: [PATCH 048/147] enhance: tag scan cursor based block --- include/libs/executor/storageapi.h | 14 +- source/dnode/vnode/src/inc/vnodeInt.h | 2 +- source/dnode/vnode/src/meta/metaQuery.c | 12 +- source/dnode/vnode/src/vnd/vnodeInitApi.c | 4 + source/libs/executor/inc/executil.h | 2 + source/libs/executor/inc/executorInt.h | 4 + source/libs/executor/inc/operator.h | 2 +- source/libs/executor/src/executil.c | 4 +- source/libs/executor/src/operator.c | 2 +- source/libs/executor/src/scanoperator.c | 271 +++++++++++++++++++++- 10 files changed, 298 insertions(+), 19 deletions(-) diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h index 773f373a2d..724d6638db 100644 --- a/include/libs/executor/storageapi.h +++ b/include/libs/executor/storageapi.h @@ -98,6 +98,16 @@ typedef struct SMTbCursor { int8_t paused; } SMTbCursor; +typedef struct SMCtbCursor { + SMeta *pMeta; + void *pCur; + tb_uid_t suid; + void *pKey; + void *pVal; + int kLen; + int vLen; +} SMCtbCursor; + typedef struct SRowBuffPos { void* pRowBuff; void* pKey; @@ -278,13 +288,15 @@ typedef struct SStoreMeta { void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables, int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) & // metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta); - int64_t (*getNumOfRowsInMem)(void* pVnode); /** int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list); int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg); int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list); */ + SMCtbCursor* (*openCtbCursor)(void *pVnode, tb_uid_t uid, int lock); + void (*closeCtbCursor)(SMCtbCursor *pCtbCur, int lock); + tb_uid_t (*ctbCursorNext)(SMCtbCursor* pCur); } SStoreMeta; typedef struct SStoreMetaReader { diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index cd7704940b..e3b2d3e41e 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -167,7 +167,7 @@ int metaAddIndexToSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq); int metaDropIndexFromSTable(SMeta* pMeta, int64_t version, SDropIndexReq* pReq); int64_t metaGetTimeSeriesNum(SMeta* pMeta); -SMCtbCursor* metaOpenCtbCursor(SMeta* pMeta, tb_uid_t uid, int lock); +SMCtbCursor* metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock); void metaCloseCtbCursor(SMCtbCursor* pCtbCur, int lock); tb_uid_t metaCtbCursorNext(SMCtbCursor* pCtbCur); SMStbCursor* metaOpenStbCursor(SMeta* pMeta, tb_uid_t uid); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index c26bb45c2b..31c7bc8500 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -408,17 +408,9 @@ _err: return NULL; } -struct SMCtbCursor { - SMeta *pMeta; - TBC *pCur; - tb_uid_t suid; - void *pKey; - void *pVal; - int kLen; - int vLen; -}; -SMCtbCursor *metaOpenCtbCursor(SMeta *pMeta, tb_uid_t uid, int lock) { +SMCtbCursor *metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock) { + SMeta* pMeta = ((SVnode*)pVnode)->pMeta; SMCtbCursor *pCtbCur = NULL; SCtbIdxKey ctbIdxKey; int ret = 0; diff --git a/source/dnode/vnode/src/vnd/vnodeInitApi.c b/source/dnode/vnode/src/vnd/vnodeInitApi.c index 5c8d563d73..dca8dd271c 100644 --- a/source/dnode/vnode/src/vnd/vnodeInitApi.c +++ b/source/dnode/vnode/src/vnd/vnodeInitApi.c @@ -96,6 +96,10 @@ void initMetadataAPI(SStoreMeta* pMeta) { pMeta->metaGetCachedTbGroup = metaGetCachedTbGroup; pMeta->metaPutTbGroupToCache = metaPutTbGroupToCache; + + pMeta->openCtbCursor = metaOpenCtbCursor; + pMeta->closeCtbCursor = metaCloseCtbCursor; + pMeta->ctbCursorNext = metaCtbCursorNext; } void initTqAPI(SStoreTqReader* pTq) { diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h index 33c9d845b9..f273f63770 100644 --- a/source/libs/executor/inc/executil.h +++ b/source/libs/executor/inc/executil.h @@ -190,4 +190,6 @@ void printDataBlock(SSDataBlock* pBlock, const char* flag); void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order); void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery); +SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, + SStorageAPI* pStorageAPI); #endif // TDENGINE_EXECUTIL_H diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index fbca5e29f9..cadf367481 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -259,6 +259,10 @@ typedef struct STagScanInfo { SLimitNode* pSlimit; SReadHandle readHandle; STableListInfo* pTableListInfo; + uint64_t suid; + void* pCtbCursor; + SNode* pTagCond; + SNode* pTagIndexCond; } STagScanInfo; typedef enum EStreamScanMode { diff --git a/source/libs/executor/inc/operator.h b/source/libs/executor/inc/operator.h index e6c3405d7f..38cefc1cc5 100644 --- a/source/libs/executor/inc/operator.h +++ b/source/libs/executor/inc/operator.h @@ -81,7 +81,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo); -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo); +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode*pTagIndexCond, SExecTaskInfo* pTaskInfo); SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index aa0c7945b0..5bb8f8a38b 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -47,8 +47,6 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* p static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI); -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, - SStorageAPI* pStorageAPI); static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; } static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; } @@ -846,7 +844,7 @@ static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, S return -1; } -static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, +SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList, void* pVnode, SStorageAPI* pStorageAPI) { SSDataBlock* pResBlock = createDataBlock(); if (pResBlock == NULL) { diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 8ddcc8fd15..0fc1b77b73 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -380,7 +380,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR return NULL; } - pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTaskInfo); + pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN == type) { SBlockDistScanPhysiNode* pBlockNode = (SBlockDistScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71b0747be8..24ed717c8a 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2688,6 +2688,271 @@ static void doTagScanOneTable(SOperatorInfo* pOperator, const SSDataBlock* pRes, } } +static void tagScanFreeUidTag(void* p) { + STUidTagInfo* pInfo = p; + if (pInfo->pTagVal != NULL) { + taosMemoryFree(pInfo->pTagVal); + } +} + +static int32_t tagScanCreateResultData(SDataType* pType, int32_t numOfRows, SScalarParam* pParam) { + SColumnInfoData* pColumnData = taosMemoryCalloc(1, sizeof(SColumnInfoData)); + if (pColumnData == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return terrno; + } + + pColumnData->info.type = pType->type; + pColumnData->info.bytes = pType->bytes; + pColumnData->info.scale = pType->scale; + pColumnData->info.precision = pType->precision; + + int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows, true); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + taosMemoryFree(pColumnData); + return terrno; + } + + pParam->columnData = pColumnData; + pParam->colAlloced = true; + return TSDB_CODE_SUCCESS; +} + +typedef struct STagScanFilterContext { + SHashObj* colHash; + int32_t index; + SArray* cInfoList; +} STagScanFilterContext; + +static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { + SColumnNode* pSColumnNode = NULL; + if (QUERY_NODE_COLUMN == nodeType((*pNode))) { + pSColumnNode = *(SColumnNode**)pNode; + } else if (QUERY_NODE_FUNCTION == nodeType((*pNode))) { + SFunctionNode* pFuncNode = *(SFunctionNode**)(pNode); + if (pFuncNode->funcType == FUNCTION_TYPE_TBNAME) { + pSColumnNode = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == pSColumnNode) { + return DEAL_RES_ERROR; + } + pSColumnNode->colId = -1; + pSColumnNode->colType = COLUMN_TYPE_TBNAME; + pSColumnNode->node.resType.type = TSDB_DATA_TYPE_VARCHAR; + pSColumnNode->node.resType.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE; + nodesDestroyNode(*pNode); + *pNode = (SNode*)pSColumnNode; + } else { + return DEAL_RES_CONTINUE; + } + } else { + return DEAL_RES_CONTINUE; + } + + STagScanFilterContext* pCtx = (STagScanFilterContext*)pContext; + void* data = taosHashGet(pCtx->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId)); + if (!data) { + taosHashPut(pCtx->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode))); + pSColumnNode->slotId = pCtx->index++; + SColumnInfo cInfo = {.colId = pSColumnNode->colId, + .type = pSColumnNode->node.resType.type, + .bytes = pSColumnNode->node.resType.bytes}; + taosArrayPush(pCtx->cInfoList, &cInfo); + } else { + SColumnNode* col = *(SColumnNode**)data; + pSColumnNode->slotId = col->slotId; + } + + return DEAL_RES_CONTINUE; +} + + +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aUidTagIdxs, void* pVnode, SStorageAPI* pAPI) { + int32_t code = 0; + int32_t numOfTables = taosArrayGetSize(aUidTags); + STagScanFilterContext ctx = {0}; + ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + + nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&ctx); + + SSDataBlock* pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); + if (pResBlock == NULL) { + + } + + SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); + taosArrayPush(pBlockList, &pResBlock); + SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; + + SScalarParam output = {0}; + code = tagScanCreateResultData(&type, numOfTables, &output); + if (code != TSDB_CODE_SUCCESS) { + + } + + code = scalarCalculate(pTagCond, pBlockList, &output); + if (code != TSDB_CODE_SUCCESS) { + } + + bool* result = (bool*)output.columnData->pData; + for (int32_t i = 0 ; i < numOfTables; ++i) { + if (result[i]) { + taosArrayPush(aUidTagIdxs, &i); + } + } + + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + colDataDestroy(output.columnData); + taosMemoryFreeClear(output.columnData); +} + +static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { + if (fmIsScanPseudoColumnFunc(pExprInfo->pExpr->_function.functionId)) { // tbname + char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; + STR_TO_VARSTR(str, "zsl"); + // if (pUidTagInfo->name != NULL) { + // STR_TO_VARSTR(str, pUidTagInfo->name); + // } else { // name is not retrieved during filter + // pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); + // } + + colDataSetVal(pColInfo, rowIndex, str, false); + } else { + STagVal tagVal = {0}; + tagVal.cid = pExprInfo->base.pParam[0].pCol->colId; + if (pUidTagInfo->pTagVal == NULL) { + colDataSetNULL(pColInfo, rowIndex); + } else { + const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pColInfo->info.type, &tagVal); + + if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) { + colDataSetNULL(pColInfo, rowIndex); + } else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) { + colDataSetVal(pColInfo, rowIndex, p, false); + } else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) { + char* tmp = taosMemoryMalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1); + varDataSetLen(tmp, tagVal.nData); + memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData); + colDataSetVal(pColInfo, rowIndex, tmp, false); + taosMemoryFree(tmp); + } else { + colDataSetVal(pColInfo, rowIndex, (const char*)&tagVal.i64, false); + } + } + } +} + +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aUidTagIdxs, + SStorageAPI* pAPI) { + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + + for (int i = 0; i < taosArrayGetSize(aUidTagIdxs); ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, *(int32_t*)taosArrayGet(aUidTagIdxs, i)); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + } + } + return 0; +} + +#if 0 +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, + SStorageAPI* pAPI) { + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + + int32_t nTbls = taosArrayGetSize(aUidTags); + for (int i = 0; i < nTbls; ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + + // refactor later + if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) { + char str[512]; + + STR_TO_VARSTR(str, "zsl"); + colDataSetVal(pDst, (i), str, false); + } else { // it is a tag value + STagVal val = {0}; + val.cid = pExprInfo[j].base.pParam[0].pCol->colId; + const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pDst->info.type, &val); + + char* data = NULL; + if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) { + data = tTagValToData((const STagVal*)p, false); + } else { + data = (char*)p; + } + colDataSetVal(pDst, i, data, + (data == NULL) || (pDst->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data))); + + if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) && + data != NULL) { + taosMemoryFree(data); + } + } + } + } + return 0; +} +#endif + + +static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { + if (pOperator->status == OP_EXEC_DONE) { + return NULL; + } + SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; + SStorageAPI* pAPI = &pTaskInfo->storageAPI; + + STagScanInfo* pInfo = pOperator->info; + SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; + SSDataBlock* pRes = pInfo->pRes; + blockDataCleanup(pRes); + int32_t count = 0; + + if (pInfo->pCtbCursor == NULL) { + pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); + } + SArray* aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); + SArray* aUidTagIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + while (1) { + while (count < pOperator->resultInfo.capacity) { + SMCtbCursor* pCur = pInfo->pCtbCursor; + tb_uid_t uid = pAPI->metaFn.ctbCursorNext(pInfo->pCtbCursor); + if (uid == 0) { + break; + } + STUidTagInfo info = {.uid = uid, .pTagVal = pCur->pVal}; + info.pTagVal = taosMemoryMalloc(pCur->vLen); + memcpy(info.pTagVal, pCur->pVal, pCur->vLen); + taosArrayPush(aUidTags, &info); + } + + int32_t numTables = taosArrayGetSize(aUidTags); + if (numTables != 0 && pInfo->pTagCond != NULL) { + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aUidTagIdxs, pAPI); + } + tagScanFillResultBlock(pOperator, pRes, aUidTags, aUidTagIdxs, pAPI); + if (taosArrayGetSize(aUidTagIdxs) != 0) { + break; + } + taosArrayClearEx(aUidTags, tagScanFreeUidTag); + taosArrayClear(aUidTagIdxs); + } + taosArrayDestroy(aUidTagIdxs); + taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); + pOperator->resultInfo.totalRows += count; + return (pRes->info.rows == 0) ? NULL : pInfo->pRes; +} + static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -2753,7 +3018,7 @@ static void destroyTagScanOperatorInfo(void* param) { } SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, - STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) { + STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -2774,7 +3039,8 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi if (code != TSDB_CODE_SUCCESS) { goto _error; } - + pInfo->pTagCond = pTagCond; + pInfo->pTagIndexCond = pTagIndexCond; pInfo->pTableListInfo = pTableListInfo; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; @@ -2789,6 +3055,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doTagScan, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); + pInfo->suid = pPhyNode->suid; return pOperator; _error: From 20f5e2af5b5f6742466e82611f2e54278af6d776 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 10 Aug 2023 17:40:54 +0800 Subject: [PATCH 049/147] continue coding and save work --- source/libs/executor/src/scanoperator.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 24ed717c8a..42c488edbe 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2913,7 +2913,6 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { SStorageAPI* pAPI = &pTaskInfo->storageAPI; STagScanInfo* pInfo = pOperator->info; - SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; SSDataBlock* pRes = pInfo->pRes; blockDataCleanup(pRes); int32_t count = 0; @@ -2941,6 +2940,8 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aUidTagIdxs, pAPI); } tagScanFillResultBlock(pOperator, pRes, aUidTags, aUidTagIdxs, pAPI); + count = taosArrayGetSize(aUidTagIdxs); + if (taosArrayGetSize(aUidTagIdxs) != 0) { break; } From 78bc18c71762f11ac219b4d63d998d6b78c03a1c Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 10 Aug 2023 18:17:42 +0800 Subject: [PATCH 050/147] update_release_version --- cmake/cmake.version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/cmake.version b/cmake/cmake.version index 86afe68188..d5ada35dc3 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.1.0.1.alpha") + SET(TD_VER_NUMBER "3.1.0.2.alpha") ENDIF () IF (DEFINED VERCOMPATIBLE) From 6155c80729211a209e3feb843b70ac15ce923716 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Thu, 10 Aug 2023 18:38:01 +0800 Subject: [PATCH 051/147] fix:offset error in tmq & add test cases --- tests/parallel_test/cases.task | 2 +- tests/system-test/7-tmq/tmqParamsTest.py | 4 ++-- tests/system-test/7-tmq/tmq_offset.py | 2 +- utils/test/c/tmq_offset_test.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 6b9a577fd5..dd28154176 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -126,7 +126,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_taosx.py -,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmq_offset.py +,,n,system-test,python3 ./test.py -f 7-tmq/tmq_offset.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/raw_block_interface_test.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqSubscribeStb-r3.py -N 5 diff --git a/tests/system-test/7-tmq/tmqParamsTest.py b/tests/system-test/7-tmq/tmqParamsTest.py index 0fc7a6cdd9..ff7c70bcd2 100644 --- a/tests/system-test/7-tmq/tmqParamsTest.py +++ b/tests/system-test/7-tmq/tmqParamsTest.py @@ -131,7 +131,7 @@ class TDTestCase: if snapshot_value == "true": if offset_value != "earliest" and offset_value != "": if offset_value == "latest": - offset_value_list = list(map(lambda x: int(x[-2].replace("wal:", "").replace(offset_value, "0")), subscription_info)) + offset_value_list = list(map(lambda x: int(x[-2].replace("wal:", "").replace("earliest", "0").replace("latest", "0").replace(offset_value, "0")), subscription_info)) tdSql.checkEqual(sum(offset_value_list) >= 0, True) rows_value_list = list(map(lambda x: int(x[-1]), subscription_info)) tdSql.checkEqual(sum(rows_value_list), expected_res) @@ -154,7 +154,7 @@ class TDTestCase: tdSql.checkEqual(rows_value_list, [None]*len(subscription_info)) else: if offset_value != "none": - offset_value_list = list(map(lambda x: int(x[-2].replace("wal:", "").replace(offset_value, "0")), subscription_info)) + offset_value_list = list(map(lambda x: int(x[-2].replace("wal:", "").replace("earliest", "0").replace("latest", "0").replace(offset_value, "0")), subscription_info)) tdSql.checkEqual(sum(offset_value_list) >= 0, True) rows_value_list = list(map(lambda x: int(x[-1]), subscription_info)) tdSql.checkEqual(sum(rows_value_list), expected_res) diff --git a/tests/system-test/7-tmq/tmq_offset.py b/tests/system-test/7-tmq/tmq_offset.py index 33d36eda71..6453f452c6 100644 --- a/tests/system-test/7-tmq/tmq_offset.py +++ b/tests/system-test/7-tmq/tmq_offset.py @@ -26,7 +26,7 @@ class TDTestCase: cmdStr1 = '%s/build/bin/taosBenchmark -i 50 -B 1 -t 1000 -n 100000 -y &'%(buildPath) tdLog.info(cmdStr1) os.system(cmdStr1) - time.sleep(10) + time.sleep(15) cmdStr2 = '%s/build/bin/tmq_offset_test &'%(buildPath) tdLog.info(cmdStr2) diff --git a/utils/test/c/tmq_offset_test.c b/utils/test/c/tmq_offset_test.c index cecbd615d1..6be9b38979 100644 --- a/utils/test/c/tmq_offset_test.c +++ b/utils/test/c/tmq_offset_test.c @@ -266,7 +266,7 @@ void test_ts3756(TAOS* pConn){ int32_t numOfAssign = 0; while (1) { - printf("start to poll\n"); +// printf("start to poll\n"); pRes = tmq_consumer_poll(tmq, timeout); if (pRes) { From 7085d6bc11d7c8fb7ef18258273bccdee5817337 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 10 Aug 2023 18:49:17 +0800 Subject: [PATCH 052/147] mxml: disable shared lib --- cmake/mxml_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 87b126d8d3..7377f81c33 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,7 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --enable-shared=no #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install From 4513acfee925eaebf27b1486561afe3afab0ffe4 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 09:54:54 +0800 Subject: [PATCH 053/147] cos: use static libs for mxml, apr, apu, curl --- cmake/apr-util_CMakeLists.txt.in | 2 +- cmake/apr_CMakeLists.txt.in | 2 +- cmake/curl_CMakeLists.txt.in | 2 +- cmake/mxml_CMakeLists.txt.in | 2 +- contrib/CMakeLists.txt | 7 ++++--- source/dnode/vnode/CMakeLists.txt | 2 +- tests/parallel_test/container_build.sh | 4 ++-- 7 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index 6172be380e..d98a381005 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ --with-apr=$ENV{HOME}/.cos-local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/apr_CMakeLists.txt.in b/cmake/apr_CMakeLists.txt.in index 538b45a7f9..18c4eb62a1 100644 --- a/cmake/apr_CMakeLists.txt.in +++ b/cmake/apr_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(apr-1 UPDATE_DISCONNECTED TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local/ + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --enable-shared=no #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 1d9d028848..5f1efc1e5a 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --without-ssl + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/cmake/mxml_CMakeLists.txt.in b/cmake/mxml_CMakeLists.txt.in index 7377f81c33..9dcb5df665 100644 --- a/cmake/mxml_CMakeLists.txt.in +++ b/cmake/mxml_CMakeLists.txt.in @@ -6,7 +6,7 @@ ExternalProject_Add(mxml #BINARY_DIR "" BUILD_IN_SOURCE TRUE #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local --enable-shared=no + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --enable-shared=no #CONFIGURE_COMMAND ./configure BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index d20b205e69..452192a288 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -10,7 +10,7 @@ set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) if(${BUILD_WITH_COS}) - file(MAKE_DIRECTORY $ENV{HOME}/.cos-local/) + file(MAKE_DIRECTORY $ENV{HOME}/.cos-local.1/) cat("${TD_SUPPORT_DIR}/mxml_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) cat("${TD_SUPPORT_DIR}/apr_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) cat("${TD_SUPPORT_DIR}/curl_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) @@ -389,10 +389,11 @@ endif() # cos if(${BUILD_WITH_COS}) if(NOT ${TD_WINDOWS}) - set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local) + set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) - INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local/include) + INCLUDE_DIRECTORIES($ENV{HOME}/.cos-local.1/include) + MESSAGE("$ENV{HOME}/.cos-local.1/include") set(CMAKE_BUILD_TYPE debug) set(ORIG_CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index c036fbc54a..684134c2d6 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -196,7 +196,7 @@ include_directories (${APR_INCLUDE_DIR}) target_include_directories( vnode PUBLIC "${TD_SOURCE_DIR}/contrib/cos-c-sdk-v5/cos_c_sdk" - PUBLIC "$ENV{HOME}/.cos-local/include" + PUBLIC "$ENV{HOME}/.cos-local.1/include" ) if(${BUILD_TEST}) diff --git a/tests/parallel_test/container_build.sh b/tests/parallel_test/container_build.sh index 62254984a9..f5e426057e 100755 --- a/tests/parallel_test/container_build.sh +++ b/tests/parallel_test/container_build.sh @@ -60,7 +60,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/.cos-local:/root/.cos-local \ + -v /root/.cos-local.1:/root/.cos-local.1 \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ @@ -89,7 +89,7 @@ docker run \ -v /root/.cargo/git:/root/.cargo/git \ -v /root/go/pkg/mod:/root/go/pkg/mod \ -v /root/.cache/go-build:/root/.cache/go-build \ - -v /root/.cos-local:/root/.cos-local \ + -v /root/.cos-local.1:/root/.cos-local.1 \ -v ${REP_REAL_PATH}/enterprise/src/plugins/taosx/target:${REP_DIR}/enterprise/src/plugins/taosx/target \ -v ${REP_REAL_PATH}/community/tools/taosws-rs/target:${REP_DIR}/community/tools/taosws-rs/target \ -v ${REP_REAL_PATH}/community/contrib/cJson/:${REP_DIR}/community/contrib/cJson \ From 104ead6783d3f5225a8d53c8df07de46c2ac2f9b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 10:06:42 +0800 Subject: [PATCH 054/147] apu: fix apr location --- cmake/apr-util_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/apr-util_CMakeLists.txt.in b/cmake/apr-util_CMakeLists.txt.in index d98a381005..5a68020dd7 100644 --- a/cmake/apr-util_CMakeLists.txt.in +++ b/cmake/apr-util_CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(aprutil-1 BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1/ --with-apr=$ENV{HOME}/.cos-local.1 #CONFIGURE_COMMAND ./configure --with-apr=/usr/local/apr BUILD_COMMAND make INSTALL_COMMAND make install From 1afbde7d2f1437ceda703d11db4b4cce60ca1309 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 11 Aug 2023 10:06:37 +0800 Subject: [PATCH 055/147] fix: add time window boundary check before interpolation --- source/libs/executor/src/timesliceoperator.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index b019985645..c9824ff2d6 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -848,6 +848,10 @@ static void doHandleTimeslice(SOperatorInfo* pOperator, SSDataBlock* pBlock) { bool ignoreNull = getIgoreNullRes(pSup); int32_t order = TSDB_ORDER_ASC; + if (checkWindowBoundReached(pSliceInfo)) { + return; + } + int32_t code = initKeeperInfo(pSliceInfo, pBlock, &pOperator->exprSupp); if (code != TSDB_CODE_SUCCESS) { T_LONG_JMP(pTaskInfo->env, code); From b6e0e076679007c2fa4343a0e58430eb00f26bda Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 11 Aug 2023 10:07:18 +0800 Subject: [PATCH 056/147] add test case --- tests/system-test/2-query/interp.py | 39 +++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 986c63839b..c2eb7bee2e 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -20,6 +20,7 @@ class TDTestCase: tbname = "tb" tbname1 = "tb1" tbname2 = "tb2" + tbname3 = "tb3" stbname = "stb" ctbname1 = "ctb1" ctbname2 = "ctb2" @@ -5607,6 +5608,44 @@ class TDTestCase: tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:06') fill(linear)") tdSql.checkRows(0) + #### TS-3799 #### + + tdSql.execute( + f'''create table if not exists {dbname}.{tbname3} (ts timestamp, c0 double)''' + ) + + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:51.000000000', 4.233947800000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:52.000000000', 3.606781000000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:52.500000000', 3.162353500000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:53.000000000', 3.162292500000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:53.500000000', 4.998230000000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:54.400000000', 8.800414999999999)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:54.900000000', 8.853271500000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:55.900000000', 7.507751500000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:56.400000000', 7.510681000000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:56.900000000', 7.841614000000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:57.900000000', 8.153809000000001)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:58.500000000', 6.866455000000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-06 23:59:59.000000000', 6.869140600000000)") + tdSql.execute(f"insert into {dbname}.{tbname3} values ('2023-08-07 00:00:00.000000000', 0.261475000000001)") + + tdSql.query(f"select _irowts, interp(c0) from {dbname}.{tbname3} range('2023-08-06 23:59:00','2023-08-06 23:59:59') every(1m) fill(next)") + tdSql.checkRows(1); + tdSql.checkData(0, 0, '2023-08-06 23:59:00') + tdSql.checkData(0, 1, 4.233947800000000) + + tdSql.query(f"select _irowts, interp(c0) from {dbname}.{tbname3} range('2023-08-06 23:59:00','2023-08-06 23:59:59') every(1m) fill(value, 1)") + tdSql.checkRows(1); + tdSql.checkData(0, 0, '2023-08-06 23:59:00') + tdSql.checkData(0, 1, 1) + + tdSql.query(f"select _irowts, interp(c0) from {dbname}.{tbname3} range('2023-08-06 23:59:00','2023-08-06 23:59:59') every(1m) fill(null)") + tdSql.checkRows(1); + tdSql.checkData(0, 0, '2023-08-06 23:59:00') + tdSql.checkData(0, 1, None) + + + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") From 7c39bc989083ce501dd6df5bd980b4edaab07057 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 11 Aug 2023 13:50:41 +0800 Subject: [PATCH 057/147] fix: some minor modifications --- source/libs/executor/inc/executorInt.h | 1 + source/libs/executor/src/scanoperator.c | 77 ++++++++++++++----------- 2 files changed, 44 insertions(+), 34 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index cadf367481..2b25feabb3 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -263,6 +263,7 @@ typedef struct STagScanInfo { void* pCtbCursor; SNode* pTagCond; SNode* pTagIndexCond; + SStorageAPI* pStorageAPI; } STagScanInfo; typedef enum EStreamScanMode { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 42c488edbe..5e0eb71c13 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2767,9 +2767,10 @@ static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { } -static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aUidTagIdxs, void* pVnode, SStorageAPI* pAPI) { +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI) { int32_t code = 0; int32_t numOfTables = taosArrayGetSize(aUidTags); + STagScanFilterContext ctx = {0}; ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); @@ -2777,48 +2778,42 @@ static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aU nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&ctx); SSDataBlock* pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); - if (pResBlock == NULL) { - - } SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)}; SScalarParam output = {0}; - code = tagScanCreateResultData(&type, numOfTables, &output); - if (code != TSDB_CODE_SUCCESS) { + tagScanCreateResultData(&type, numOfTables, &output); - } - - code = scalarCalculate(pTagCond, pBlockList, &output); - if (code != TSDB_CODE_SUCCESS) { - } + scalarCalculate(pTagCond, pBlockList, &output); bool* result = (bool*)output.columnData->pData; for (int32_t i = 0 ; i < numOfTables; ++i) { if (result[i]) { - taosArrayPush(aUidTagIdxs, &i); + taosArrayPush(aFilterIdxs, &i); } } - taosHashCleanup(ctx.colHash); - taosArrayDestroy(ctx.cInfoList); - blockDataDestroy(pResBlock); - taosArrayDestroy(pBlockList); colDataDestroy(output.columnData); taosMemoryFreeClear(output.columnData); + + blockDataDestroy(pResBlock); + taosArrayDestroy(pBlockList); + + taosHashCleanup(ctx.colHash); + taosArrayDestroy(ctx.cInfoList); } static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { if (fmIsScanPseudoColumnFunc(pExprInfo->pExpr->_function.functionId)) { // tbname char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0}; +// if (pUidTagInfo->name != NULL) { +// STR_TO_VARSTR(str, pUidTagInfo->name); +// } else { // name is not retrieved during filter +// pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); +// } STR_TO_VARSTR(str, "zsl"); - // if (pUidTagInfo->name != NULL) { - // STR_TO_VARSTR(str, pUidTagInfo->name); - // } else { // name is not retrieved during filter - // pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); - // } colDataSetVal(pColInfo, rowIndex, str, false); } else { @@ -2846,13 +2841,15 @@ static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo } } -static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aUidTagIdxs, +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, SStorageAPI* pAPI) { STagScanInfo* pInfo = pOperator->info; SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; - for (int i = 0; i < taosArrayGetSize(aUidTagIdxs); ++i) { - STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, *(int32_t*)taosArrayGet(aUidTagIdxs, i)); + size_t szTables = taosArrayGetSize(aFilterIdxs); + for (int i = 0; i < szTables; ++i) { + int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); @@ -2920,8 +2917,10 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { if (pInfo->pCtbCursor == NULL) { pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); } + SArray* aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); - SArray* aUidTagIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + SArray* aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + while (1) { while (count < pOperator->resultInfo.capacity) { SMCtbCursor* pCur = pInfo->pCtbCursor; @@ -2936,20 +2935,26 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { } int32_t numTables = taosArrayGetSize(aUidTags); - if (numTables != 0 && pInfo->pTagCond != NULL) { - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aUidTagIdxs, pAPI); - } - tagScanFillResultBlock(pOperator, pRes, aUidTags, aUidTagIdxs, pAPI); - count = taosArrayGetSize(aUidTagIdxs); - - if (taosArrayGetSize(aUidTagIdxs) != 0) { + if (numTables == 0) { break; } + + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + + tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, pAPI); + count = taosArrayGetSize(aFilterIdxs); + + if (count != 0) { + break; + } + taosArrayClearEx(aUidTags, tagScanFreeUidTag); - taosArrayClear(aUidTagIdxs); + taosArrayClear(aFilterIdxs); } - taosArrayDestroy(aUidTagIdxs); + + taosArrayDestroy(aFilterIdxs); taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); + pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } @@ -3012,6 +3017,9 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { static void destroyTagScanOperatorInfo(void* param) { STagScanInfo* pInfo = (STagScanInfo*)param; + if (pInfo->pCtbCursor != NULL) { + pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); + } pInfo->pRes = blockDataDestroy(pInfo->pRes); taosArrayDestroy(pInfo->matchInfo.pList); pInfo->pTableListInfo = tableListDestroy(pInfo->pTableListInfo); @@ -3043,6 +3051,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi pInfo->pTagCond = pTagCond; pInfo->pTagIndexCond = pTagIndexCond; pInfo->pTableListInfo = pTableListInfo; + pInfo->pStorageAPI = &pTaskInfo->storageAPI; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; pInfo->curPos = 0; From 1c7f854a719b590f4aa5d201d2543681d1e28975 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 11 Aug 2023 14:47:28 +0800 Subject: [PATCH 058/147] enhance: add only meta ctb index to tag scan physi node --- include/libs/nodes/plannodes.h | 14 +++- source/libs/nodes/src/nodesCloneFuncs.c | 11 ++- source/libs/nodes/src/nodesCodeFuncs.c | 70 ++++++++++++++++++- source/libs/nodes/src/nodesMsgFuncs.c | 89 +++++++++++++++++++++++++ 4 files changed, 180 insertions(+), 4 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 063318332a..0830dc4918 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -334,7 +334,19 @@ typedef struct SScanPhysiNode { bool groupOrderScan; } SScanPhysiNode; -typedef SScanPhysiNode STagScanPhysiNode; +typedef struct STagScanPhysiNode { + // SScanPhysiNode scan; //TODO? + SPhysiNode node; + SNodeList* pScanCols; + SNodeList* pScanPseudoCols; + uint64_t uid; // unique id of the table + uint64_t suid; + int8_t tableType; + SName tableName; + bool groupOrderScan; + bool onlyMetaCtbIdx; //no tbname, tag index not used. +} STagScanPhysiNode; + typedef SScanPhysiNode SBlockDistScanPhysiNode; typedef struct SLastRowScanPhysiNode { diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index f5eacf0bd5..965af41fa7 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -564,7 +564,16 @@ static int32_t physiScanCopy(const SScanPhysiNode* pSrc, SScanPhysiNode* pDst) { } static int32_t physiTagScanCopy(const STagScanPhysiNode* pSrc, STagScanPhysiNode* pDst) { - return physiScanCopy(pSrc, pDst); + COPY_BASE_OBJECT_FIELD(node, physiNodeCopy); + CLONE_NODE_LIST_FIELD(pScanCols); + CLONE_NODE_LIST_FIELD(pScanPseudoCols); + COPY_SCALAR_FIELD(uid); + COPY_SCALAR_FIELD(suid); + COPY_SCALAR_FIELD(tableType); + COPY_OBJECT_FIELD(tableName, sizeof(SName)); + COPY_SCALAR_FIELD(groupOrderScan); + COPY_SCALAR_FIELD(onlyMetaCtbIdx); + return TSDB_CODE_SUCCESS; } static int32_t physiTableScanCopy(const STableScanPhysiNode* pSrc, STableScanPhysiNode* pDst) { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index f25616065e..3540f8cb70 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1562,7 +1562,7 @@ static const char* jkScanPhysiPlanTableName = "TableName"; static const char* jkScanPhysiPlanGroupOrderScan = "GroupOrderScan"; static int32_t physiScanNodeToJson(const void* pObj, SJson* pJson) { - const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + const SScanPhysiNode* pNode = (const SScanPhysiNode*)pObj; int32_t code = physicPlanNodeToJson(pObj, pJson); if (TSDB_CODE_SUCCESS == code) { @@ -1591,7 +1591,7 @@ static int32_t physiScanNodeToJson(const void* pObj, SJson* pJson) { } static int32_t jsonToPhysiScanNode(const SJson* pJson, void* pObj) { - STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + SScanPhysiNode* pNode = (SScanPhysiNode*)pObj; int32_t code = jsonToPhysicPlanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { @@ -1619,6 +1619,70 @@ static int32_t jsonToPhysiScanNode(const SJson* pJson, void* pObj) { return code; } +static const char* jkTagScanPhysiOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; + +static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { + const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + + int32_t code = physicPlanNodeToJson(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkScanPhysiPlanScanCols, pNode->pScanCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodeListToJson(pJson, jkScanPhysiPlanScanPseudoCols, pNode->pScanPseudoCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableId, pNode->uid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanSTableId, pNode->suid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableType, pNode->tableType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddObject(pJson, jkScanPhysiPlanTableName, nameToJson, &pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkScanPhysiPlanGroupOrderScan, pNode->groupOrderScan); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkTagScanPhysiOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); + } + return code; +} + +static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { + STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + + int32_t code = jsonToPhysicPlanNode(pJson, pObj); + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkScanPhysiPlanScanCols, &pNode->pScanCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = jsonToNodeList(pJson, jkScanPhysiPlanScanPseudoCols, &pNode->pScanPseudoCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanTableId, &pNode->uid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanSTableId, &pNode->suid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetTinyIntValue(pJson, jkScanPhysiPlanTableType, &pNode->tableType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonToObject(pJson, jkScanPhysiPlanTableName, jsonToName, &pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkScanPhysiPlanGroupOrderScan, &pNode->groupOrderScan); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); + } + return code; +} + static const char* jkLastRowScanPhysiPlanGroupTags = "GroupTags"; static const char* jkLastRowScanPhysiPlanGroupSort = "GroupSort"; @@ -6590,6 +6654,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { case QUERY_NODE_LOGIC_PLAN: return logicPlanToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + return physiTableScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: return physiScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: @@ -6908,6 +6973,7 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) { case QUERY_NODE_LOGIC_PLAN: return jsonToLogicPlan(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + return jsonToPhysiTagScanNode(pJson, pObj); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: case QUERY_NODE_PHYSICAL_PLAN_TABLE_COUNT_SCAN: return jsonToPhysiScanNode(pJson, pObj); diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 20e829766d..4d1120861d 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2003,6 +2003,91 @@ static int32_t msgToPhysiScanNode(STlvDecoder* pDecoder, void* pObj) { return code; } +enum { + PHY_TAG_SCAN_CODE_BASE_NODE = 1, + PHY_TAG_SCAN_CODE_SCAN_COLS, + PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, + PHY_TAG_SCAN_CODE_BASE_UID, + PHY_TAG_SCAN_CODE_BASE_SUID, + PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, + PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, + PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, + PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX +}; + +static int32_t physiTagScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { + const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; + + int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_NODE, physiNodeToMsg, &pNode->node); + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_COLS, nodeListToMsg, pNode->pScanCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, nodeListToMsg, pNode->pScanPseudoCols); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_UID, pNode->uid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_SUID, pNode->suid); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeI8(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, pNode->tableType); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, nameToMsg, &pNode->tableName); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, pNode->groupOrderScan); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX, pNode->onlyMetaCtbIdx); + } + return code; +} + +static int32_t msgToPhysiTagScanNode(STlvDecoder* pDecoder, void* pObj) { + STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; + + int32_t code = TSDB_CODE_SUCCESS; + STlv* pTlv = NULL; + tlvForEach(pDecoder, pTlv, code) { + switch (pTlv->type) { + case PHY_TAG_SCAN_CODE_BASE_NODE: + code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node); + break; + case PHY_TAG_SCAN_CODE_SCAN_COLS: + code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanCols); + break; + case PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS: + code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanPseudoCols); + break; + case PHY_TAG_SCAN_CODE_BASE_UID: + code = tlvDecodeU64(pTlv, &pNode->uid); + break; + case PHY_TAG_SCAN_CODE_BASE_SUID: + code = tlvDecodeU64(pTlv, &pNode->suid); + break; + case PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE: + code = tlvDecodeI8(pTlv, &pNode->tableType); + break; + case PHY_TAG_SCAN_CODE_BASE_TABLE_NAME: + code = tlvDecodeObjFromTlv(pTlv, msgToName, &pNode->tableName); + break; + case PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN: + code = tlvDecodeBool(pTlv, &pNode->groupOrderScan); + break; + case PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX: + code = tlvDecodeBool(pTlv, &pNode->onlyMetaCtbIdx); + break; + default: + break; + } + } + + return code; +} + enum { PHY_LAST_ROW_SCAN_CODE_SCAN = 1, PHY_LAST_ROW_SCAN_CODE_GROUP_TAGS, @@ -3726,6 +3811,8 @@ static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { code = caseWhenNodeToMsg(pObj, pEncoder); break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + code = physiTagScanNodeToMsg(pObj, pEncoder); + break; case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: code = physiScanNodeToMsg(pObj, pEncoder); break; @@ -3869,6 +3956,8 @@ static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) { code = msgToCaseWhenNode(pDecoder, pObj); break; case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: + code = msgToPhysiTagScanNode(pDecoder, pObj); + break; case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: code = msgToPhysiScanNode(pDecoder, pObj); break; From a0c62d215d36bc980aabe3ebb12ee32521db2c43 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Fri, 11 Aug 2023 14:54:43 +0800 Subject: [PATCH 059/147] enhance: tag scan only meta ctb idx backend modification --- source/libs/executor/src/operator.c | 19 ++++++++++--------- source/libs/executor/src/scanoperator.c | 9 ++++++--- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 0fc1b77b73..d0805a86e4 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -370,17 +370,18 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR STableCountScanPhysiNode* pTblCountScanNode = (STableCountScanPhysiNode*)pPhyNode; pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { - STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode; + STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); - int32_t code = createScanTableListInfo(pScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, - pTagIndexCond, pTaskInfo); - if (code != TSDB_CODE_SUCCESS) { - pTaskInfo->code = code; - qError("failed to getTableList, code: %s", tstrerror(code)); - return NULL; + if (!pTagScanPhyNode->onlyMetaCtbIdx) { + int32_t code = createScanTableListInfo(pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, + pTagIndexCond, pTaskInfo); + if (code != TSDB_CODE_SUCCESS) { + pTaskInfo->code = code; + qError("failed to getTableList, code: %s", tstrerror(code)); + return NULL; + } } - - pOperator = createTagScanOperatorInfo(pHandle, pScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); + pOperator = createTagScanOperatorInfo(pHandle, pTagScanPhyNode, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN == type) { SBlockDistScanPhysiNode* pBlockNode = (SBlockDistScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 5e0eb71c13..107ea14914 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -3048,10 +3048,13 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi if (code != TSDB_CODE_SUCCESS) { goto _error; } + pInfo->pTagCond = pTagCond; pInfo->pTagIndexCond = pTagIndexCond; - pInfo->pTableListInfo = pTableListInfo; + pInfo->suid = pPhyNode->suid; pInfo->pStorageAPI = &pTaskInfo->storageAPI; + + pInfo->pTableListInfo = pTableListInfo; pInfo->pRes = createDataBlockFromDescNode(pDescNode); pInfo->readHandle = *pReadHandle; pInfo->curPos = 0; @@ -3062,10 +3065,10 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); + __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScan; pOperator->fpSet = - createOperatorFpSet(optrDummyOpenFn, doTagScan, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); + createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); - pInfo->suid = pPhyNode->suid; return pOperator; _error: From 8987553d9c8cc2c86f8d575ab3bc6431f9b399eb Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 11 Aug 2023 15:56:38 +0800 Subject: [PATCH 060/147] fix: remove ins_modules --- include/libs/nodes/nodes.h | 2 +- source/common/src/systable.c | 2 +- source/dnode/mnode/impl/src/mndShow.c | 2 ++ source/libs/nodes/src/nodesCodeFuncs.c | 2 ++ source/libs/nodes/src/nodesUtilFuncs.c | 4 ++-- source/libs/parser/src/parAstParser.c | 2 ++ source/libs/parser/src/parAuthenticator.c | 2 +- source/libs/parser/src/parTranslater.c | 4 +++- 8 files changed, 14 insertions(+), 6 deletions(-) diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h index 2319643b09..8eeeff4148 100644 --- a/include/libs/nodes/nodes.h +++ b/include/libs/nodes/nodes.h @@ -169,7 +169,7 @@ typedef enum ENodeType { QUERY_NODE_REVOKE_STMT, QUERY_NODE_SHOW_DNODES_STMT, QUERY_NODE_SHOW_MNODES_STMT, - QUERY_NODE_SHOW_MODULES_STMT, +// QUERY_NODE_SHOW_MODULES_STMT, QUERY_NODE_SHOW_QNODES_STMT, QUERY_NODE_SHOW_SNODES_STMT, QUERY_NODE_SHOW_BNODES_STMT, diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 0940fcef6a..eb8042099f 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -314,7 +314,7 @@ static const SSysDbTableSchema userUserPrivilegesSchema[] = { static const SSysTableMeta infosMeta[] = { {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true}, {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true}, - {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true}, + // {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true}, {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true}, {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema), true}, {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true}, diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 44f4751700..7d842be7de 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -58,8 +58,10 @@ static int32_t convertToRetrieveType(char *name, int32_t len) { type = TSDB_MGMT_TABLE_DNODE; } else if (strncasecmp(name, TSDB_INS_TABLE_MNODES, len) == 0) { type = TSDB_MGMT_TABLE_MNODE; +/* } else if (strncasecmp(name, TSDB_INS_TABLE_MODULES, len) == 0) { type = TSDB_MGMT_TABLE_MODULE; +*/ } else if (strncasecmp(name, TSDB_INS_TABLE_QNODES, len) == 0) { type = TSDB_MGMT_TABLE_QNODE; } else if (strncasecmp(name, TSDB_INS_TABLE_SNODES, len) == 0) { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index f25616065e..263d1b21ab 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -197,8 +197,10 @@ const char* nodesNodeName(ENodeType type) { return "ShowDnodesStmt"; case QUERY_NODE_SHOW_MNODES_STMT: return "ShowMnodesStmt"; +/* case QUERY_NODE_SHOW_MODULES_STMT: return "ShowModulesStmt"; +*/ case QUERY_NODE_SHOW_QNODES_STMT: return "ShowQnodesStmt"; case QUERY_NODE_SHOW_SNODES_STMT: diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index c8197721fb..75b63b9d34 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -406,7 +406,7 @@ SNode* nodesMakeNode(ENodeType type) { return makeNode(type, sizeof(SRevokeStmt)); case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT: @@ -982,7 +982,7 @@ void nodesDestroyNode(SNode* pNode) { break; case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT: diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index fdec9cba79..86b4566d37 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -690,8 +690,10 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { return collectMetaKeyFromShowDnodes(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_MNODES_STMT: return collectMetaKeyFromShowMnodes(pCxt, (SShowStmt*)pStmt); +/* case QUERY_NODE_SHOW_MODULES_STMT: return collectMetaKeyFromShowModules(pCxt, (SShowStmt*)pStmt); +*/ case QUERY_NODE_SHOW_QNODES_STMT: return collectMetaKeyFromShowQnodes(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_SNODES_STMT: diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c index 9b2ac662c8..6a26dcfa8b 100644 --- a/source/libs/parser/src/parAuthenticator.c +++ b/source/libs/parser/src/parAuthenticator.c @@ -263,7 +263,7 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) { return authAlterTable(pCxt, (SAlterTableStmt*)pStmt); case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_SNODES_STMT: case QUERY_NODE_SHOW_BNODES_STMT: diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 38118c03f8..a41447edf3 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -92,6 +92,7 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { .numOfShowCols = 1, .pShowCols = {"*"} }, +/* { .showType = QUERY_NODE_SHOW_MODULES_STMT, .pDbName = TSDB_INFORMATION_SCHEMA_DB, @@ -99,6 +100,7 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { .numOfShowCols = 1, .pShowCols = {"*"} }, +*/ { .showType = QUERY_NODE_SHOW_QNODES_STMT, .pDbName = TSDB_INFORMATION_SCHEMA_DB, @@ -9164,7 +9166,7 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) { case QUERY_NODE_SHOW_USERS_STMT: case QUERY_NODE_SHOW_DNODES_STMT: case QUERY_NODE_SHOW_MNODES_STMT: - case QUERY_NODE_SHOW_MODULES_STMT: +// case QUERY_NODE_SHOW_MODULES_STMT: case QUERY_NODE_SHOW_QNODES_STMT: case QUERY_NODE_SHOW_FUNCTIONS_STMT: case QUERY_NODE_SHOW_INDEXES_STMT: From 84452c8deefc8ca600dcefdff907f6bc4608edcc Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 16:36:53 +0800 Subject: [PATCH 061/147] cos: use static sdk --- cmake/curl_CMakeLists.txt.in | 2 +- source/dnode/vnode/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 5f1efc1e5a..856d42257a 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 684134c2d6..a07e38e53b 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -162,7 +162,7 @@ target_link_libraries( PUBLIC index # s3 - cos_c_sdk + cos_c_sdk_static ${APR_UTIL_LIBRARY} ${APR_LIBRARY} ${MINIXML_LIBRARY} From 6530658815346945aaa333326ab72f54067182c4 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 11 Aug 2023 17:05:59 +0800 Subject: [PATCH 062/147] fix: continue coding --- source/libs/executor/src/operator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index d0805a86e4..7f0c5baa36 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -373,7 +373,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { - int32_t code = createScanTableListInfo(pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, + int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, pTagIndexCond, pTaskInfo); if (code != TSDB_CODE_SUCCESS) { pTaskInfo->code = code; From e1971cf0a023e1f8e155a27e0fc1e92e2c56b415 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 11 Aug 2023 17:18:38 +0800 Subject: [PATCH 063/147] curl: disable brotli with static lib --- cmake/curl_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in index 856d42257a..0fe0c2256f 100644 --- a/cmake/curl_CMakeLists.txt.in +++ b/cmake/curl_CMakeLists.txt.in @@ -9,7 +9,7 @@ ExternalProject_Add(curl BUILD_IN_SOURCE TRUE BUILD_ALWAYS 1 #UPDATE_COMMAND "" - CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps + CONFIGURE_COMMAND ./configure --prefix=$ENV{HOME}/.cos-local.1 --without-ssl --enable-shared=no --disable-ldap --disable-ldaps --without-brotli #CONFIGURE_COMMAND ./configure --without-ssl BUILD_COMMAND make INSTALL_COMMAND make install From 47d2f9ad6df4385e0cb912841204e94cff1d4ed0 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 11 Aug 2023 17:52:52 +0800 Subject: [PATCH 064/147] fix: first run without tag cond --- source/dnode/vnode/src/meta/metaQuery.c | 4 ++-- source/dnode/vnode/src/vnd/vnodeQuery.c | 6 +++--- source/libs/executor/src/operator.c | 1 + source/libs/executor/src/scanoperator.c | 15 +++++++++++---- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 31c7bc8500..39c3dfa080 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -427,7 +427,7 @@ SMCtbCursor *metaOpenCtbCursor(void* pVnode, tb_uid_t uid, int lock) { metaRLock(pMeta); } - ret = tdbTbcOpen(pMeta->pCtbIdx, &pCtbCur->pCur, NULL); + ret = tdbTbcOpen(pMeta->pCtbIdx, (TBC**)&pCtbCur->pCur, NULL); if (ret < 0) { metaULock(pMeta); taosMemoryFree(pCtbCur); @@ -1365,7 +1365,7 @@ int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) { } int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *pUidTagInfo) { - SMCtbCursor *pCur = metaOpenCtbCursor(((SVnode *)pVnode)->pMeta, suid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 1); // If len > 0 means there already have uids, and we only want the // tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 51f4cee40c..48f8ec021d 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -440,7 +440,7 @@ int32_t vnodeGetTableList(void* pVnode, int8_t type, SArray* pList) { } int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list) { - SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, uid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, uid, 1); while (1) { tb_uid_t id = metaCtbCursorNext(pCur); @@ -462,7 +462,7 @@ int32_t vnodeGetCtbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list) { SVnode *pVnodeObj = pVnode; - SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj->pMeta, suid, 1); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj, suid, 1); while (1) { tb_uid_t id = metaCtbCursorNext(pCur); @@ -521,7 +521,7 @@ int32_t vnodeGetStbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bo } int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) { - SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, suid, 0); + SMCtbCursor *pCur = metaOpenCtbCursor(pVnode, suid, 0); if (!pCur) { return TSDB_CODE_FAILED; } diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 7f0c5baa36..31998d13b6 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -371,6 +371,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; + pTagScanPhyNode->onlyMetaCtbIdx = true; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 107ea14914..5f8bf03d80 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2922,7 +2922,8 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { SArray* aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); while (1) { - while (count < pOperator->resultInfo.capacity) { + int32_t numTables = 0; + while (numTables < pOperator->resultInfo.capacity) { SMCtbCursor* pCur = pInfo->pCtbCursor; tb_uid_t uid = pAPI->metaFn.ctbCursorNext(pInfo->pCtbCursor); if (uid == 0) { @@ -2932,14 +2933,19 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { info.pTagVal = taosMemoryMalloc(pCur->vLen); memcpy(info.pTagVal, pCur->pVal, pCur->vLen); taosArrayPush(aUidTags, &info); + ++numTables; } - int32_t numTables = taosArrayGetSize(aUidTags); if (numTables == 0) { break; } - - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + if (pInfo->pTagCond != NULL) { + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + } else { + for (int i = 0; i < numTables; ++i) { + taosArrayPush(aFilterIdxs, &i); + } + } tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, pAPI); count = taosArrayGetSize(aFilterIdxs); @@ -2955,6 +2961,7 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { taosArrayDestroy(aFilterIdxs); taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); + pRes->info.rows = count; pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } From 847bc0da0881a625b06a1352ef80ed32a8e1927f Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 11 Aug 2023 18:10:16 +0800 Subject: [PATCH 065/147] fix: infinite tomb data sync loop --- source/dnode/vnode/src/tsdb/tsdbSnapshot.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index bdcf4a87c1..f547119f49 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -392,6 +392,9 @@ static int32_t tsdbSnapReadTombData(STsdbSnapReader* reader, uint8_t** data) { code = tTombBlockPut(reader->tombBlock, record); TSDB_CHECK_CODE(code, lino, _exit); + code = tsdbIterMergerNext(reader->tombIterMerger); + TSDB_CHECK_CODE(code, lino, _exit); + if (TOMB_BLOCK_SIZE(reader->tombBlock) >= 81920) { break; } From deda4b9eed07444133f140213e702c392824b111 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Fri, 11 Aug 2023 19:26:17 +0800 Subject: [PATCH 066/147] opti:wal logic --- include/libs/wal/wal.h | 7 +- source/dnode/vnode/src/inc/tq.h | 2 +- source/dnode/vnode/src/tq/tqRead.c | 44 ++++----- source/dnode/vnode/src/tq/tqUtil.c | 12 +-- source/libs/wal/src/walRead.c | 142 ++++------------------------- 5 files changed, 39 insertions(+), 168 deletions(-) diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index b19a0d783d..1f7323a06a 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -153,7 +153,6 @@ struct SWalReader { int64_t capacity; TdThreadMutex mutex; SWalFilterCond cond; - // TODO remove it SWalCkHead *pHead; }; @@ -208,9 +207,9 @@ void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset) // only for tq usage void walSetReaderCapacity(SWalReader *pRead, int32_t capacity); -int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead); -int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead); -int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead); +int32_t walFetchHead(SWalReader *pRead, int64_t ver); +int32_t walFetchBody(SWalReader *pRead); +int32_t walSkipFetchBody(SWalReader *pRead); void walRefFirstVer(SWal *, SWalRef *); void walRefLastVer(SWal *, SWalRef *); diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index a6a84075b5..f08c308185 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -127,7 +127,7 @@ void tqDestroyTqHandle(void* data); // tqRead int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* offset); int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset); -int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum, uint64_t reqId); +int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t reqId); // tqExec int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SPackedData submit, STaosxRsp* pRsp, int32_t* totalRows); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 9b8f1781cb..6c091fa4cb 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -184,50 +184,42 @@ end: return tbSuid == realTbSuid; } -int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** ppCkHead, uint64_t reqId) { - int32_t code = 0; +int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t reqId) { + int32_t code = -1; int32_t vgId = TD_VID(pTq->pVnode); - taosThreadMutexLock(&pHandle->pWalReader->mutex); int64_t offset = *fetchOffset; + int64_t lastVer = walGetLastVer(pHandle->pWalReader->pWal); + int64_t committedVer = walGetCommittedVer(pHandle->pWalReader->pWal); + int64_t appliedVer = walGetAppliedVer(pHandle->pWalReader->pWal); - while (1) { - if (walFetchHead(pHandle->pWalReader, offset, *ppCkHead) < 0) { + wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64 ", applied index:%" PRId64, + vgId, offset, lastVer, committedVer, appliedVer); + + while (offset <= appliedVer) { + if (walFetchHead(pHandle->pWalReader, offset) < 0) { tqDebug("tmq poll: consumer:0x%" PRIx64 ", (epoch %d) vgId:%d offset %" PRId64 ", no more log to return, reqId:0x%" PRIx64, pHandle->consumerId, pHandle->epoch, vgId, offset, reqId); - *fetchOffset = offset; - code = -1; goto END; } tqDebug("vgId:%d, consumer:0x%" PRIx64 " taosx get msg ver %" PRId64 ", type: %s, reqId:0x%" PRIx64, vgId, - pHandle->consumerId, offset, TMSG_INFO((*ppCkHead)->head.msgType), reqId); + pHandle->consumerId, offset, TMSG_INFO(pHandle->pWalReader->pHead->head.msgType), reqId); - if ((*ppCkHead)->head.msgType == TDMT_VND_SUBMIT) { - code = walFetchBody(pHandle->pWalReader, ppCkHead); - - if (code < 0) { - *fetchOffset = offset; - code = -1; - goto END; - } - *fetchOffset = offset; - code = 0; + if (pHandle->pWalReader->pHead->head.msgType == TDMT_VND_SUBMIT) { + code = walFetchBody(pHandle->pWalReader); goto END; } else { if (pHandle->fetchMeta != WITH_DATA) { - SWalCont* pHead = &((*ppCkHead)->head); + SWalCont* pHead = &(pHandle->pWalReader->pHead->head); if (IS_META_MSG(pHead->msgType) && !(pHead->msgType == TDMT_VND_DELETE && pHandle->fetchMeta == ONLY_META)) { - code = walFetchBody(pHandle->pWalReader, ppCkHead); + code = walFetchBody(pHandle->pWalReader); if (code < 0) { - *fetchOffset = offset; - code = -1; goto END; } if (isValValidForTable(pHandle, pHead)) { - *fetchOffset = offset; code = 0; goto END; } else { @@ -236,10 +228,8 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea } } } - code = walSkipFetchBody(pHandle->pWalReader, *ppCkHead); + code = walSkipFetchBody(pHandle->pWalReader); if (code < 0) { - *fetchOffset = offset; - code = -1; goto END; } offset++; @@ -247,7 +237,7 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea } END: - taosThreadMutexUnlock(&pHandle->pWalReader->mutex); + *fetchOffset = offset; return code; } diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index 5cbca6e0f2..42aac52c63 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -179,7 +179,6 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, SRpcMsg* pMsg, STqOffsetVal* offset) { int code = 0; int32_t vgId = TD_VID(pTq->pVnode); - SWalCkHead* pCkHead = NULL; SMqMetaRsp metaRsp = {0}; STaosxRsp taosxRsp = {0}; tqInitTaosxRsp(&taosxRsp, *offset); @@ -216,12 +215,6 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, if (offset->type == TMQ_OFFSET__LOG) { walReaderVerifyOffset(pHandle->pWalReader, offset); int64_t fetchVer = offset->version; - pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048); - if (pCkHead == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - code = -1; - goto end; - } walSetReaderCapacity(pHandle->pWalReader, 2048); int totalRows = 0; @@ -234,14 +227,14 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, break; } - if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead, pRequest->reqId) < 0) { + if (tqFetchLog(pTq, pHandle, &fetchVer, pRequest->reqId) < 0) { tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer); // setRequestVersion(&taosxRsp.reqOffset, offset->version); code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__POLL_DATA_RSP, vgId); goto end; } - SWalCont* pHead = &pCkHead->head; + SWalCont* pHead = &pHandle->pWalReader->pHead->head; tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d", pRequest->consumerId, pRequest->epoch, vgId, fetchVer, pHead->msgType); @@ -291,7 +284,6 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, end: tDeleteSTaosxRsp(&taosxRsp); - taosMemoryFreeClear(pCkHead); return code; } diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 54b9576eb1..01404494e3 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -16,10 +16,6 @@ #include "taoserror.h" #include "walInt.h" -static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer); -static int32_t walFetchBodyNew(SWalReader *pRead); -static int32_t walSkipFetchBodyNew(SWalReader *pRead); - SWalReader *walOpenReader(SWal *pWal, SWalFilterCond *cond) { SWalReader *pReader = taosMemoryCalloc(1, sizeof(SWalReader)); if (pReader == NULL) { @@ -80,19 +76,19 @@ int32_t walNextValidMsg(SWalReader *pReader) { return -1; } while (fetchVer <= appliedVer) { - if (walFetchHeadNew(pReader, fetchVer) < 0) { + if (walFetchHead(pReader, fetchVer) < 0) { return -1; } int32_t type = pReader->pHead->head.msgType; if (type == TDMT_VND_SUBMIT || ((type == TDMT_VND_DELETE) && (pReader->cond.deleteMsg == 1)) || (IS_META_MSG(type) && pReader->cond.scanMeta)) { - if (walFetchBodyNew(pReader) < 0) { + if (walFetchBody(pReader) < 0) { return -1; } return 0; } else { - if (walSkipFetchBodyNew(pReader) < 0) { + if (walSkipFetchBody(pReader) < 0) { return -1; } @@ -256,102 +252,7 @@ int32_t walReaderSeekVer(SWalReader *pReader, int64_t ver) { void walSetReaderCapacity(SWalReader *pRead, int32_t capacity) { pRead->capacity = capacity; } -static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) { - int64_t contLen; - bool seeked = false; - - wDebug("vgId:%d, wal starts to fetch head, index:%" PRId64, pRead->pWal->cfg.vgId, fetchVer); - - if (pRead->curVersion != fetchVer) { - if (walReaderSeekVer(pRead, fetchVer) < 0) { - return -1; - } - seeked = true; - } - - while (1) { - contLen = taosReadFile(pRead->pLogFile, pRead->pHead, sizeof(SWalCkHead)); - if (contLen == sizeof(SWalCkHead)) { - break; - } else if (contLen == 0 && !seeked) { - if(walReadSeekVerImpl(pRead, fetchVer) < 0){ - return -1; - } - seeked = true; - continue; - } else { - if (contLen < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - } else { - terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - } - return -1; - } - } -// pRead->curInvalid = 0; - return 0; -} - -static int32_t walFetchBodyNew(SWalReader *pReader) { - SWalCont *pReadHead = &pReader->pHead->head; - int64_t ver = pReadHead->version; - - wDebug("vgId:%d, wal starts to fetch body, ver:%" PRId64 " ,len:%d, total", pReader->pWal->cfg.vgId, ver, - pReadHead->bodyLen); - - if (pReader->capacity < pReadHead->bodyLen) { - SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pReader->pHead, sizeof(SWalCkHead) + pReadHead->bodyLen); - if (ptr == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - - pReader->pHead = ptr; - pReadHead = &pReader->pHead->head; - pReader->capacity = pReadHead->bodyLen; - } - - if (pReadHead->bodyLen != taosReadFile(pReader->pLogFile, pReadHead->body, pReadHead->bodyLen)) { - if (pReadHead->bodyLen < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); - wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since %s", - pReader->pWal->cfg.vgId, pReader->pHead->head.version, ver, tstrerror(terrno)); - } else { - wError("vgId:%d, wal fetch body error:%" PRId64 ", read request index:%" PRId64 ", since file corrupted", - pReader->pWal->cfg.vgId, pReader->pHead->head.version, ver); - terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - } - return -1; - } - - if (walValidBodyCksum(pReader->pHead) != 0) { - wError("vgId:%d, wal fetch body error:%" PRId64 ", since body checksum not passed", pReader->pWal->cfg.vgId, ver); - terrno = TSDB_CODE_WAL_FILE_CORRUPTED; - return -1; - } - - wDebug("vgId:%d, index:%" PRId64 " is fetched, type:%d, cursor advance", pReader->pWal->cfg.vgId, ver, pReader->pHead->head.msgType); - pReader->curVersion = ver + 1; - return 0; -} - -static int32_t walSkipFetchBodyNew(SWalReader *pRead) { - int64_t code; - - code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR); - if (code < 0) { - terrno = TAOS_SYSTEM_ERROR(errno); -// pRead->curInvalid = 1; - return -1; - } - - pRead->curVersion++; - wDebug("vgId:%d, version advance to %" PRId64 ", skip fetch", pRead->pWal->cfg.vgId, pRead->curVersion); - - return 0; -} - -int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { +int32_t walFetchHead(SWalReader *pRead, int64_t ver) { int64_t code; int64_t contLen; bool seeked = false; @@ -369,15 +270,13 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { if (pRead->curVersion != ver) { code = walReaderSeekVer(pRead, ver); if (code < 0) { -// pRead->curVersion = ver; -// pRead->curInvalid = 1; return -1; } seeked = true; } while (1) { - contLen = taosReadFile(pRead->pLogFile, pHead, sizeof(SWalCkHead)); + contLen = taosReadFile(pRead->pLogFile, pRead->pHead, sizeof(SWalCkHead)); if (contLen == sizeof(SWalCkHead)) { break; } else if (contLen == 0 && !seeked) { @@ -392,12 +291,11 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { } else { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } -// pRead->curInvalid = 1; return -1; } } - code = walValidHeadCksum(pHead); + code = walValidHeadCksum(pRead->pHead); if (code != 0) { wError("vgId:%d, unexpected wal log index:%" PRId64 ", since head checksum not passed", pRead->pWal->cfg.vgId, ver); @@ -405,32 +303,27 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) { return -1; } -// pRead->curInvalid = 0; return 0; } -int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead) { - int64_t code; - +int32_t walSkipFetchBody(SWalReader *pRead) { wDebug("vgId:%d, skip fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64 ", applied ver:%" PRId64, - pRead->pWal->cfg.vgId, pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, + pRead->pWal->cfg.vgId, pRead->pHead->head.version, pRead->pWal->vers.firstVer, pRead->pWal->vers.commitVer, pRead->pWal->vers.lastVer, pRead->pWal->vers.appliedVer); - code = taosLSeekFile(pRead->pLogFile, pHead->head.bodyLen, SEEK_CUR); + int64_t code = taosLSeekFile(pRead->pLogFile, pRead->pHead->head.bodyLen, SEEK_CUR); if (code < 0) { terrno = TAOS_SYSTEM_ERROR(errno); -// pRead->curInvalid = 1; return -1; } pRead->curVersion++; - return 0; } -int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { - SWalCont *pReadHead = &((*ppHead)->head); +int32_t walFetchBody(SWalReader *pRead) { + SWalCont *pReadHead = &pRead->pHead->head; int64_t ver = pReadHead->version; wDebug("vgId:%d, fetch body %" PRId64 ", first ver:%" PRId64 ", commit ver:%" PRId64 ", last ver:%" PRId64 @@ -439,13 +332,13 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { pRead->pWal->vers.appliedVer); if (pRead->capacity < pReadHead->bodyLen) { - SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(*ppHead, sizeof(SWalCkHead) + pReadHead->bodyLen); + SWalCkHead *ptr = (SWalCkHead *)taosMemoryRealloc(pRead->pHead, sizeof(SWalCkHead) + pReadHead->bodyLen); if (ptr == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } - *ppHead = ptr; - pReadHead = &((*ppHead)->head); + pRead->pHead = ptr; + pReadHead = &pRead->pHead->head; pRead->capacity = pReadHead->bodyLen; } @@ -459,27 +352,24 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) { pRead->pWal->cfg.vgId, pReadHead->version, ver); terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } -// pRead->curInvalid = 1; return -1; } if (pReadHead->version != ver) { wError("vgId:%d, wal fetch body error, index:%" PRId64 ", read request index:%" PRId64, pRead->pWal->cfg.vgId, pReadHead->version, ver); -// pRead->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } - if (walValidBodyCksum(*ppHead) != 0) { + if (walValidBodyCksum(pRead->pHead) != 0) { wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId, ver); -// pRead->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; return -1; } - pRead->curVersion = ver + 1; + pRead->curVersion++; return 0; } From dea30255ecc6be4119166373a30ff409cf0b9182 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 11 Aug 2023 23:51:52 +0800 Subject: [PATCH 067/147] fix(stream): add new datablock type. --- include/common/tcommon.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/common/tcommon.h b/include/common/tcommon.h index bdfb1d32b4..705f5b675b 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -169,6 +169,7 @@ typedef enum EStreamType { STREAM_PULL_OVER, STREAM_FILL_OVER, STREAM_CREATE_CHILD_TABLE, + STREAM_TRANS_STATE, } EStreamType; #pragma pack(push, 1) From a89ce1a20b2da84a42257c01e7be000bc3ad47ed Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 12 Aug 2023 01:24:07 +0800 Subject: [PATCH 068/147] fix(stream): transfer state by using data block. --- include/common/tcommon.h | 1 + include/libs/stream/tstream.h | 1 + source/libs/stream/src/streamExec.c | 60 ++++++++++++++++++++++---- source/libs/stream/src/streamRecover.c | 29 +++++++++++++ 4 files changed, 82 insertions(+), 9 deletions(-) diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 705f5b675b..1dfe30af71 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -152,6 +152,7 @@ enum { STREAM_INPUT__DATA_RETRIEVE, STREAM_INPUT__GET_RES, STREAM_INPUT__CHECKPOINT, + STREAM_INPUT__TRANS_STATE, STREAM_INPUT__REF_DATA_BLOCK, STREAM_INPUT__DESTROY, }; diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index b9b24917f3..b7e323a213 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -627,6 +627,7 @@ int32_t streamSourceScanHistoryData(SStreamTask* pTask); int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask); int32_t streamDispatchTransferStateMsg(SStreamTask* pTask); +int32_t appendTranstateIntoInputQ(SStreamTask* pTask); // agg level int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index b479931cd2..102c8805b5 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -390,6 +390,11 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { if (level == TASK_LEVEL__SOURCE) { streamTaskFillHistoryFinished(pTask); streamTaskEndScanWAL(pTask); + + code = streamDoTransferStateToStreamTask(pTask); + if (code != TSDB_CODE_SUCCESS) { // todo handle this + return code; + } } else if (level == TASK_LEVEL__AGG) { // do transfer task operator states. code = streamDoTransferStateToStreamTask(pTask); if (code != TSDB_CODE_SUCCESS) { // todo handle this @@ -460,6 +465,40 @@ static int32_t extractMsgFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu } } +int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock) { + const char* id = pTask->id.idStr; + int32_t code = TSDB_CODE_SUCCESS; + + int32_t level = pTask->info.taskLevel; + if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SINK) { + int32_t remain = streamAlignTransferState(pTask); + if (remain > 0) { + qDebug("s-task:%s receive upstream transfer state msg, remain:%d", id, remain); + return 0; + } + + // transfer the ownership of executor state + qDebug("s-task:%s all upstream tasks send transfer msg, open transfer state flag", id); + ASSERT(pTask->streamTaskId.taskId != 0 && pTask->info.fillHistory == 1); + + pTask->status.transferState = true; + } + + // dispatch the transtate block to downstream task immediately + if (level == TASK_LEVEL__SOURCE || level == TASK_LEVEL__AGG) { + // pBlock-> = pTask->id.taskId; + pBlock->srcVgId = pTask->pMeta->vgId; + code = taosWriteQitem(pTask->outputInfo.queue->queue, pBlock); + if (code == 0) { + streamDispatchStreamBlock(pTask); + } else { + streamFreeQitem((SStreamQueueItem*)pBlock); + } + } + + return code; +} + /** * todo: the batch of blocks should be tuned dynamic, according to the total elapsed time of each batch of blocks, the * appropriate batch of blocks should be handled in 5 to 10 sec. @@ -484,6 +523,11 @@ int32_t streamExecForAll(SStreamTask* pTask) { break; } + if (pInput->type == STREAM_INPUT__TRANS_STATE) { + streamProcessTranstateBlock(pTask, (SStreamDataBlock*)pInput); + return 0; + } + if (pTask->info.taskLevel == TASK_LEVEL__SINK) { ASSERT(pInput->type == STREAM_INPUT__DATA_BLOCK); qDebug("s-task:%s sink task start to sink %d blocks", id, batchSize); @@ -557,17 +601,15 @@ int32_t streamTaskEndScanWAL(SStreamTask* pTask) { qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, el); // 1. notify all downstream tasks to transfer executor state after handle all history blocks. - int32_t code = streamDispatchTransferStateMsg(pTask); - if (code != TSDB_CODE_SUCCESS) { - // todo handle error - } +// pTask->status.transferState = true; + appendTranstateIntoInputQ(pTask); // 2. do transfer stream task operator states. - pTask->status.transferState = true; - code = streamDoTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { // todo handle error - return code; - } + // todo remove this +// int32_t code = streamDoTransferStateToStreamTask(pTask); +// if (code != TSDB_CODE_SUCCESS) { // todo handle error +// return code; +// } return TSDB_CODE_SUCCESS; } diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index e59b3f682d..b46ded6ca7 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -415,6 +415,35 @@ static int32_t doDispatchTransferMsg(SStreamTask* pTask, const SStreamTransferRe return 0; } +int32_t appendTranstateIntoInputQ(SStreamTask* pTask) { + SStreamDataBlock* pTranstate = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, sizeof(SSDataBlock)); + if (pTranstate == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock)); + if (pBlock == NULL) { + taosFreeQitem(pTranstate); + return TSDB_CODE_OUT_OF_MEMORY; + } + + pBlock->info.type = STREAM_TRANS_STATE; + pBlock->info.rows = 1; + pBlock->info.childId = pTask->info.selfChildId; + + pTranstate->blocks = taosArrayInit(4, sizeof(SSDataBlock));//pBlock; + taosArrayPush(pTranstate->blocks, pBlock); + + taosMemoryFree(pBlock); + if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pTranstate) < 0) { + taosFreeQitem(pTranstate); + return TSDB_CODE_OUT_OF_MEMORY; + } + + streamSchedExec(pTask); + return TSDB_CODE_SUCCESS; +} + int32_t streamDispatchTransferStateMsg(SStreamTask* pTask) { SStreamTransferReq req = { .streamId = pTask->id.streamId, .childId = pTask->info.selfChildId }; From edd2fa4f351c3c3434c2143822b0a188a1c305d1 Mon Sep 17 00:00:00 2001 From: slzhou Date: Sat, 12 Aug 2023 08:17:43 +0800 Subject: [PATCH 069/147] fix: pass compilation and simple test --- source/libs/executor/src/scanoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 5f8bf03d80..ac20bae167 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2940,7 +2940,7 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { break; } if (pInfo->pTagCond != NULL) { - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, pInfo->readHandle.vnode, aFilterIdxs, pAPI); + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI); } else { for (int i = 0; i < numTables; ++i) { taosArrayPush(aFilterIdxs, &i); From f83bfec067deb4de1ab9e98434094f0d0e20a8cf Mon Sep 17 00:00:00 2001 From: slzhou Date: Sat, 12 Aug 2023 08:28:25 +0800 Subject: [PATCH 070/147] fix: change only meta ctb idx back to false --- source/libs/executor/src/operator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 31998d13b6..abef8298e5 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -371,7 +371,7 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; - pTagScanPhyNode->onlyMetaCtbIdx = true; + pTagScanPhyNode->onlyMetaCtbIdx = false; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, From ce721a0146f65e37d5708564a1b8d1a89c293bdf Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 12 Aug 2023 16:32:05 +0800 Subject: [PATCH 071/147] fix(stream): fix error. --- include/libs/stream/tstream.h | 1 + source/dnode/vnode/src/tq/tqRestore.c | 8 +++++--- source/libs/stream/src/stream.c | 3 ++- source/libs/stream/src/streamExec.c | 23 ++++++++++++----------- source/libs/stream/src/streamRecover.c | 5 +++++ 5 files changed, 25 insertions(+), 15 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index b7e323a213..f40a6c9338 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -272,6 +272,7 @@ typedef struct SStreamStatus { int8_t schedStatus; int8_t keepTaskStatus; bool transferState; + bool appendTranstateBlock; // has append the transfer state data block already int8_t timerActive; // timer is active int8_t pauseAllowed; // allowed task status to be set to be paused } SStreamStatus; diff --git a/source/dnode/vnode/src/tq/tqRestore.c b/source/dnode/vnode/src/tq/tqRestore.c index 3d9a91899c..a217bc2966 100644 --- a/source/dnode/vnode/src/tq/tqRestore.c +++ b/source/dnode/vnode/src/tq/tqRestore.c @@ -214,9 +214,11 @@ static void checkForFillHistoryVerRange(SStreamTask* pTask, int64_t ver) { qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64 ", not scan wal anymore, set the transfer state flag", pTask->id.idStr, ver, pTask->dataRange.range.maxVer); - pTask->status.transferState = true; - - /*int32_t code = */streamSchedExec(pTask); + if (!pTask->status.appendTranstateBlock) { + pTask->status.appendTranstateBlock = true; + appendTranstateIntoInputQ(pTask); + /*int32_t code = */streamSchedExec(pTask); + } } } diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index f85ade591c..fa24c01418 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -385,8 +385,9 @@ int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) { destroyStreamDataBlock((SStreamDataBlock*) pItem); return code; } - } else if (type == STREAM_INPUT__CHECKPOINT) { + } else if (type == STREAM_INPUT__CHECKPOINT || type == STREAM_INPUT__TRANS_STATE) { taosWriteQitem(pTask->inputQueue->queue, pItem); + qDebug("s-task:%s trans-state blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr, total, size); } else if (type == STREAM_INPUT__GET_RES) { // use the default memory limit, refactor later. taosWriteQitem(pTask->inputQueue->queue, pItem); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 102c8805b5..c73868123c 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -391,11 +391,11 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { streamTaskFillHistoryFinished(pTask); streamTaskEndScanWAL(pTask); - code = streamDoTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { // todo handle this - return code; - } - } else if (level == TASK_LEVEL__AGG) { // do transfer task operator states. + code = streamDoTransferStateToStreamTask(pTask); + if (code != TSDB_CODE_SUCCESS) { // todo handle this + return code; + } + } else if (level == TASK_LEVEL__AGG) { // do transfer task operator states. code = streamDoTransferStateToStreamTask(pTask); if (code != TSDB_CODE_SUCCESS) { // todo handle this return code; @@ -484,9 +484,10 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock pTask->status.transferState = true; } - // dispatch the transtate block to downstream task immediately - if (level == TASK_LEVEL__SOURCE || level == TASK_LEVEL__AGG) { - // pBlock-> = pTask->id.taskId; + // dispatch the tran-state block to downstream task immediately + int32_t type = pTask->outputInfo.type; + if ((level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) && + (type == TASK_OUTPUT__FIXED_DISPATCH || type == TASK_OUTPUT__SHUFFLE_DISPATCH)) { pBlock->srcVgId = pTask->pMeta->vgId; code = taosWriteQitem(pTask->outputInfo.queue->queue, pBlock); if (code == 0) { @@ -640,10 +641,10 @@ int32_t streamTryExec(SStreamTask* pTask) { // the schedStatus == TASK_SCHED_STATUS__ACTIVE, streamSchedExec cannot be executed, so execute once again by // call this function (streamExecForAll) directly. - code = streamExecForAll(pTask); - if (code < 0) { +// code = streamExecForAll(pTask); +// if (code < 0) { // do nothing - } +// } } atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index b46ded6ca7..708524bf10 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -427,6 +427,8 @@ int32_t appendTranstateIntoInputQ(SStreamTask* pTask) { return TSDB_CODE_OUT_OF_MEMORY; } + pTranstate->type = STREAM_INPUT__TRANS_STATE; + pBlock->info.type = STREAM_TRANS_STATE; pBlock->info.rows = 1; pBlock->info.childId = pTask->info.selfChildId; @@ -440,7 +442,10 @@ int32_t appendTranstateIntoInputQ(SStreamTask* pTask) { return TSDB_CODE_OUT_OF_MEMORY; } + qDebug("s-task:%s set sched-status:%d, prev:%d", pTask->id.idStr, TASK_SCHED_STATUS__INACTIVE, pTask->status.schedStatus); + pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE; streamSchedExec(pTask); + return TSDB_CODE_SUCCESS; } From 0d0e30b7646afe4761dc25f932c726652e13c609 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 12 Aug 2023 17:20:10 +0800 Subject: [PATCH 072/147] fix(stream): fix the error. --- include/common/tcommon.h | 1 + source/libs/stream/src/streamExec.c | 119 ++++++++++++++++++---------- 2 files changed, 79 insertions(+), 41 deletions(-) diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 1dfe30af71..92e64f075f 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -152,6 +152,7 @@ enum { STREAM_INPUT__DATA_RETRIEVE, STREAM_INPUT__GET_RES, STREAM_INPUT__CHECKPOINT, + STREAM_INPUT__CHECKPOINT_TRIGGER, STREAM_INPUT__TRANS_STATE, STREAM_INPUT__REF_DATA_BLOCK, STREAM_INPUT__DESTROY, diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index c73868123c..bbfaa8cb9d 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -334,6 +334,9 @@ static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { qDebug("s-task:%s no need to update time window for non-source task", pStreamTask->id.idStr); } + // todo check the output queue for fill-history task, and wait for it complete + + // 1. expand the query time window for stream task of WAL scanner pTimeWindow->skey = INT64_MIN; qStreamInfoResetTimewindowFilter(pStreamTask->exec.pExecutor); @@ -389,8 +392,6 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { int32_t level = pTask->info.taskLevel; if (level == TASK_LEVEL__SOURCE) { streamTaskFillHistoryFinished(pTask); - streamTaskEndScanWAL(pTask); - code = streamDoTransferStateToStreamTask(pTask); if (code != TSDB_CODE_SUCCESS) { // todo handle this return code; @@ -405,14 +406,41 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { return code; } -static int32_t extractMsgFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks, - const char* id) { - int32_t retryTimes = 0; - int32_t MAX_RETRY_TIMES = 5; +static int32_t extractBlocksFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks) { + int32_t retryTimes = 0; + int32_t MAX_RETRY_TIMES = 5; + const char* id = pTask->id.idStr; + if (pTask->info.taskLevel == TASK_LEVEL__SINK) { // extract block from inputQ, one-by-one + while (1) { + if (streamTaskShouldPause(&pTask->status) || streamTaskShouldStop(&pTask->status)) { + qDebug("s-task:%s task should pause, extract input blocks:%d", pTask->id.idStr, *numOfBlocks); + return TSDB_CODE_SUCCESS; + } + + SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue); + if (qItem == NULL) { + if (pTask->info.taskLevel == TASK_LEVEL__SOURCE && (++retryTimes) < MAX_RETRY_TIMES) { + taosMsleep(10); + qDebug("===stream===try again batchSize:%d, retry:%d, %s", *numOfBlocks, retryTimes, id); + continue; + } + + qDebug("===stream===break batchSize:%d, %s", *numOfBlocks, id); + return TSDB_CODE_SUCCESS; + } + + qDebug("s-task:%s sink task handle result block one-by-one", id); + *numOfBlocks = 1; + *pInput = qItem; + return TSDB_CODE_SUCCESS; + } + } + + // non sink task while (1) { - if (streamTaskShouldPause(&pTask->status)) { - qDebug("s-task:%s task should pause, input blocks:%d", pTask->id.idStr, *numOfBlocks); + if (streamTaskShouldPause(&pTask->status) || streamTaskShouldStop(&pTask->status)) { + qDebug("s-task:%s task should pause, extract input blocks:%d", pTask->id.idStr, *numOfBlocks); return TSDB_CODE_SUCCESS; } @@ -420,47 +448,52 @@ static int32_t extractMsgFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInpu if (qItem == NULL) { if (pTask->info.taskLevel == TASK_LEVEL__SOURCE && (++retryTimes) < MAX_RETRY_TIMES) { taosMsleep(10); - qDebug("===stream===try again batchSize:%d, retry:%d", *numOfBlocks, retryTimes); + qDebug("===stream===try again batchSize:%d, retry:%d, %s", *numOfBlocks, retryTimes, id); continue; } - qDebug("===stream===break batchSize:%d", *numOfBlocks); + qDebug("===stream===break batchSize:%d, %s", *numOfBlocks, id); return TSDB_CODE_SUCCESS; } - // do not merge blocks for sink node - if (pTask->info.taskLevel == TASK_LEVEL__SINK) { - *numOfBlocks = 1; - *pInput = qItem; - return TSDB_CODE_SUCCESS; - } - - if (*pInput == NULL) { - ASSERT((*numOfBlocks) == 0); - *pInput = qItem; - } else { - // todo we need to sort the data block, instead of just appending into the array list. - void* newRet = streamMergeQueueItem(*pInput, qItem); - if (newRet == NULL) { - if (terrno == 0) { - qDebug("s-task:%s failed to merge blocks from inputQ, numOfBlocks:%d", id, *numOfBlocks); - } else { - qDebug("s-task:%s failed to merge blocks from inputQ, numOfBlocks:%d, code:%s", id, *numOfBlocks, - tstrerror(terrno)); - } + // do not merge blocks for sink node and check point data block + if (qItem->type == STREAM_INPUT__CHECKPOINT || qItem->type == STREAM_INPUT__CHECKPOINT_TRIGGER || + qItem->type == STREAM_INPUT__TRANS_STATE) { + if (*pInput == NULL) { + qDebug("s-task:%s checkpoint/transtate msg extracted, start to process immediately", id); + *numOfBlocks = 1; + *pInput = qItem; + return TSDB_CODE_SUCCESS; + } else { + // previous existed blocks needs to be handle, before handle the checkpoint msg block + qDebug("s-task:%s checkpoint/transtate msg extracted, handle previous block first, numOfBlocks:%d", id, + *numOfBlocks); streamQueueProcessFail(pTask->inputQueue); return TSDB_CODE_SUCCESS; } + } else { + if (*pInput == NULL) { + ASSERT((*numOfBlocks) == 0); + *pInput = qItem; + } else { + // todo we need to sort the data block, instead of just appending into the array list. + void* newRet = streamMergeQueueItem(*pInput, qItem); + if (newRet == NULL) { + qError("s-task:%s failed to merge blocks from inputQ, numOfBlocks:%d", id, *numOfBlocks); + streamQueueProcessFail(pTask->inputQueue); + return TSDB_CODE_SUCCESS; + } - *pInput = newRet; - } + *pInput = newRet; + } - *numOfBlocks += 1; - streamQueueProcessSuccess(pTask->inputQueue); + *numOfBlocks += 1; + streamQueueProcessSuccess(pTask->inputQueue); - if (*numOfBlocks >= MAX_STREAM_EXEC_BATCH_NUM) { - qDebug("s-task:%s batch size limit:%d reached, start to process blocks", id, MAX_STREAM_EXEC_BATCH_NUM); - return TSDB_CODE_SUCCESS; + if (*numOfBlocks >= MAX_STREAM_EXEC_BATCH_NUM) { + qDebug("s-task:%s batch size limit:%d reached, start to process blocks", id, MAX_STREAM_EXEC_BATCH_NUM); + return TSDB_CODE_SUCCESS; + } } } } @@ -476,14 +509,18 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock qDebug("s-task:%s receive upstream transfer state msg, remain:%d", id, remain); return 0; } + } - // transfer the ownership of executor state + // transfer the ownership of executor state + if (level == TASK_LEVEL__SOURCE) { + qDebug("s-task:%s open transfer state flag for source task", id); + } else { qDebug("s-task:%s all upstream tasks send transfer msg, open transfer state flag", id); ASSERT(pTask->streamTaskId.taskId != 0 && pTask->info.fillHistory == 1); - - pTask->status.transferState = true; } + pTask->status.transferState = true; + // dispatch the tran-state block to downstream task immediately int32_t type = pTask->outputInfo.type; if ((level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) && @@ -518,7 +555,7 @@ int32_t streamExecForAll(SStreamTask* pTask) { // merge multiple input data if possible in the input queue. qDebug("s-task:%s start to extract data block from inputQ", id); - /*int32_t code = */extractMsgFromInputQ(pTask, &pInput, &batchSize, id); + /*int32_t code = */extractBlocksFromInputQ(pTask, &pInput, &batchSize); if (pInput == NULL) { ASSERT(batchSize == 0); break; From 30c2a9c61927c66a61673d2f9996455ebae3c0ac Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 12 Aug 2023 18:27:51 +0800 Subject: [PATCH 073/147] refactor: do some internal refactor. --- source/libs/stream/inc/streamInt.h | 2 +- source/libs/stream/src/stream.c | 2 +- source/libs/stream/src/streamDispatch.c | 9 +++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index 32d6dc65d9..30c941d106 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -52,7 +52,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock) int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq); -int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData); +int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData); int32_t streamDispatchCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet); int32_t streamDoDispatchScanHistoryFinishMsg(SStreamTask* pTask, const SStreamScanHistoryFinishReq* pReq, int32_t vgId, diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index fa24c01418..39f4e7fc7a 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -268,7 +268,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i } else { qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", pTask->id.idStr, pRsp->downstreamTaskId, tstrerror(code), ++pTask->msgInfo.retryCount); - return streamDispatchAllBlocks(pTask, pTask->msgInfo.pData); + return doDispatchAllBlocks(pTask, pTask->msgInfo.pData); } } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 8334ea1c88..7eef42e289 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -436,7 +436,7 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S return 0; } -int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { +int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { int32_t code = 0; int32_t numOfBlocks = taosArrayGetSize(pData->blocks); @@ -552,7 +552,7 @@ static void doRetryDispatchData(void* param, void* tmrId) { SStreamTask* pTask = param; ASSERT(pTask->outputInfo.status == TASK_OUTPUT_STATUS__WAIT); - int32_t code = streamDispatchAllBlocks(pTask, pTask->msgInfo.pData); + int32_t code = doDispatchAllBlocks(pTask, pTask->msgInfo.pData); if (code != TSDB_CODE_SUCCESS) { qDebug("s-task:%s reset the waitRspCnt to be 0 before launch retry dispatch", pTask->id.idStr); atomic_store_32(&pTask->shuffleDispatcher.waitingRspCnt, 0); @@ -593,12 +593,13 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) { } pTask->msgInfo.pData = pBlock; - ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK); + ASSERT(pBlock->type == STREAM_INPUT__DATA_BLOCK || pBlock->type == STREAM_INPUT__CHECKPOINT_TRIGGER || + pBlock->type == STREAM_INPUT__TRANS_STATE); int32_t retryCount = 0; while (1) { - int32_t code = streamDispatchAllBlocks(pTask, pBlock); + int32_t code = doDispatchAllBlocks(pTask, pBlock); if (code == TSDB_CODE_SUCCESS) { break; } From 8f9de93cf005197196be53d7f6b3820c721b5d47 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 12 Aug 2023 18:37:36 +0800 Subject: [PATCH 074/147] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 2 ++ source/libs/stream/src/stream.c | 56 ++++++++++++++++++++++++++++----- 2 files changed, 50 insertions(+), 8 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index f40a6c9338..8dc0684976 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -251,6 +251,7 @@ typedef struct SStreamChildEpInfo { int32_t nodeId; int32_t childId; int32_t taskId; + int8_t dataAllowed; SEpSet epSet; } SStreamChildEpInfo; @@ -400,6 +401,7 @@ typedef struct { typedef struct { int64_t streamId; + int32_t type; int32_t taskId; int32_t dataSrcVgId; int32_t upstreamTaskId; diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 39f4e7fc7a..79759ff012 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -239,21 +239,61 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64, pTask->id.idStr, pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen); - // todo add the input queue buffer limitation - streamTaskEnqueueBlocks(pTask, pReq, pRsp); - tDeleteStreamDispatchReq(pReq); + int32_t status = 0; - if (exec) { - if (streamTryExec(pTask) < 0) { - return -1; - } + SStreamChildEpInfo* pInfo = streamTaskGetUpstreamTaskEpInfo(pTask, pReq->upstreamTaskId); + ASSERT(pInfo != NULL); + + if (!pInfo->dataAllowed) { + qWarn("s-task:%s data from task:0x%x is denied, since inputQ is closed for it", pTask->id.idStr, pReq->upstreamTaskId); + status = TASK_INPUT_STATUS__BLOCKED; } else { - streamSchedExec(pTask); + // Current task has received the checkpoint req from the upstream task, from which the message should all be blocked + if (pReq->type == STREAM_INPUT__CHECKPOINT_TRIGGER) { + streamTaskCloseUpstreamInput(pTask, pReq->upstreamTaskId); + qDebug("s-task:%s close inputQ for upstream:0x%x", pTask->id.idStr, pReq->upstreamTaskId); + } + + status = streamTaskAppendInputBlocks(pTask, pReq); } + { + // do send response with the input status + int32_t code = buildDispatchRsp(pTask, pReq, status, &pRsp->pCont); + if (code != TSDB_CODE_SUCCESS) { + // todo handle failure + return code; + } + + pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp); + tmsgSendRsp(pRsp); + } + + tDeleteStreamDispatchReq(pReq); + streamSchedExec(pTask); + return 0; } +//int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp, bool exec) { +// qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64, pTask->id.idStr, +// pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen); +// +// // todo add the input queue buffer limitation +// streamTaskEnqueueBlocks(pTask, pReq, pRsp); +// tDeleteStreamDispatchReq(pReq); +// +// if (exec) { +// if (streamTryExec(pTask) < 0) { +// return -1; +// } +// } else { +// streamSchedExec(pTask); +// } +// +// return 0; +//} + // todo record the idle time for dispatch data int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) { if (code != TSDB_CODE_SUCCESS) { From a0534ee715acd723063093903866a61f1a4aea03 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 12 Aug 2023 18:40:09 +0800 Subject: [PATCH 075/147] refactor: do some internal refactor. --- source/libs/stream/src/stream.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 79759ff012..9548c3c327 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -235,6 +235,24 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock return 0; } +static int32_t streamTaskAppendInputBlocks(SStreamTask* pTask, const SStreamDispatchReq* pReq) { + int8_t status = 0; + + SStreamDataBlock* pBlock = createStreamBlockFromDispatchMsg(pReq, pReq->type, pReq->srcVgId); + if (pBlock == NULL) { + streamTaskInputFail(pTask); + status = TASK_INPUT_STATUS__FAILED; + qError("vgId:%d, s-task:%s failed to receive dispatch msg, reason: out of memory", pTask->pMeta->vgId, + pTask->id.idStr); + } else { + int32_t code = tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pBlock); + // input queue is full, upstream is blocked now + status = (code == TSDB_CODE_SUCCESS) ? TASK_INPUT_STATUS__NORMAL : TASK_INPUT_STATUS__BLOCKED; + } + + return status; +} + int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp, bool exec) { qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64, pTask->id.idStr, pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen); From 075b5e94817d4c3e8d04b0a0e832acd0219c8ebe Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 12 Aug 2023 18:41:54 +0800 Subject: [PATCH 076/147] refactor: do some internal refactor. --- source/libs/stream/src/stream.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 9548c3c327..d73bb1562e 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -253,6 +253,25 @@ static int32_t streamTaskAppendInputBlocks(SStreamTask* pTask, const SStreamDisp return status; } +static int32_t buildDispatchRsp(const SStreamTask* pTask, const SStreamDispatchReq* pReq, int32_t status, void** pBuf) { + *pBuf = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamDispatchRsp)); + if (*pBuf == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + ((SMsgHead*)(*pBuf))->vgId = htonl(pReq->upstreamNodeId); + SStreamDispatchRsp* pDispatchRsp = POINTER_SHIFT((*pBuf), sizeof(SMsgHead)); + + pDispatchRsp->inputStatus = status; + pDispatchRsp->streamId = htobe64(pReq->streamId); + pDispatchRsp->upstreamNodeId = htonl(pReq->upstreamNodeId); + pDispatchRsp->upstreamTaskId = htonl(pReq->upstreamTaskId); + pDispatchRsp->downstreamNodeId = htonl(pTask->info.nodeId); + pDispatchRsp->downstreamTaskId = htonl(pTask->id.taskId); + + return TSDB_CODE_SUCCESS; +} + int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp, bool exec) { qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64, pTask->id.idStr, pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen); From 75e6fb0f16627525725f1a55038cb2f4137c98fd Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 12 Aug 2023 18:47:01 +0800 Subject: [PATCH 077/147] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 3 ++- source/libs/stream/src/stream.c | 11 ++++++++++- source/libs/stream/src/streamDispatch.c | 2 +- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 8dc0684976..3542788b4b 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -403,7 +403,7 @@ typedef struct { int64_t streamId; int32_t type; int32_t taskId; - int32_t dataSrcVgId; + int32_t srcVgId; int32_t upstreamTaskId; int32_t upstreamChildId; int32_t upstreamNodeId; @@ -582,6 +582,7 @@ int32_t streamSetupScheduleTrigger(SStreamTask* pTask); int32_t streamProcessRunReq(SStreamTask* pTask); int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec); int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code); +void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId); int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg); diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index d73bb1562e..caf20a499c 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -145,7 +145,7 @@ int32_t streamSchedExec(SStreamTask* pTask) { int32_t streamTaskEnqueueBlocks(SStreamTask* pTask, const SStreamDispatchReq* pReq, SRpcMsg* pRsp) { int8_t status = 0; - SStreamDataBlock* pBlock = createStreamDataFromDispatchMsg(pReq, STREAM_INPUT__DATA_BLOCK, pReq->dataSrcVgId); + SStreamDataBlock* pBlock = createStreamDataFromDispatchMsg(pReq, STREAM_INPUT__DATA_BLOCK, pReq->srcVgId); if (pBlock == NULL) { streamTaskInputFail(pTask); status = TASK_INPUT_STATUS__FAILED; @@ -235,6 +235,8 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock return 0; } + + static int32_t streamTaskAppendInputBlocks(SStreamTask* pTask, const SStreamDispatchReq* pReq) { int8_t status = 0; @@ -272,6 +274,13 @@ static int32_t buildDispatchRsp(const SStreamTask* pTask, const SStreamDispatchR return TSDB_CODE_SUCCESS; } +void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId) { + SStreamChildEpInfo* pInfo = streamTaskGetUpstreamTaskEpInfo(pTask, taskId); + if (pInfo != NULL) { + pInfo->dataAllowed = false; + } +} + int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pRsp, bool exec) { qDebug("s-task:%s receive dispatch msg from taskId:0x%x(vgId:%d), msgLen:%" PRId64, pTask->id.idStr, pReq->upstreamTaskId, pReq->upstreamNodeId, pReq->totalLen); diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 7eef42e289..a162f0e770 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -36,7 +36,7 @@ static int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatc if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->upstreamTaskId) < 0) return -1; - if (tEncodeI32(pEncoder, pReq->dataSrcVgId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->srcVgId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->upstreamChildId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->upstreamNodeId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->blockNum) < 0) return -1; From 09da6c6840aaec42a3b0b89b7ee60b8bf15a378d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 12 Aug 2023 18:51:20 +0800 Subject: [PATCH 078/147] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 1 + source/libs/stream/src/stream.c | 36 +---------------------- source/libs/stream/src/streamData.c | 39 +++++++++++++++++++++++++ source/libs/stream/src/streamDispatch.c | 4 +-- 4 files changed, 43 insertions(+), 37 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 3542788b4b..cee2def85f 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -122,6 +122,7 @@ typedef struct { int8_t type; int32_t srcVgId; + int32_t srcTaskId; int32_t childId; int64_t sourceVer; int64_t reqId; diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index caf20a499c..0eaeafd0b3 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -142,40 +142,6 @@ int32_t streamSchedExec(SStreamTask* pTask) { return 0; } -int32_t streamTaskEnqueueBlocks(SStreamTask* pTask, const SStreamDispatchReq* pReq, SRpcMsg* pRsp) { - int8_t status = 0; - - SStreamDataBlock* pBlock = createStreamDataFromDispatchMsg(pReq, STREAM_INPUT__DATA_BLOCK, pReq->srcVgId); - if (pBlock == NULL) { - streamTaskInputFail(pTask); - status = TASK_INPUT_STATUS__FAILED; - qError("vgId:%d, s-task:%s failed to receive dispatch msg, reason: out of memory", pTask->pMeta->vgId, - pTask->id.idStr); - } else { - int32_t code = tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pBlock); - // input queue is full, upstream is blocked now - status = (code == TSDB_CODE_SUCCESS)? TASK_INPUT_STATUS__NORMAL:TASK_INPUT_STATUS__BLOCKED; - } - - // rsp by input status - void* buf = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamDispatchRsp)); - ((SMsgHead*)buf)->vgId = htonl(pReq->upstreamNodeId); - SStreamDispatchRsp* pDispatchRsp = POINTER_SHIFT(buf, sizeof(SMsgHead)); - - pDispatchRsp->inputStatus = status; - pDispatchRsp->streamId = htobe64(pReq->streamId); - pDispatchRsp->upstreamNodeId = htonl(pReq->upstreamNodeId); - pDispatchRsp->upstreamTaskId = htonl(pReq->upstreamTaskId); - pDispatchRsp->downstreamNodeId = htonl(pTask->info.nodeId); - pDispatchRsp->downstreamTaskId = htonl(pTask->id.taskId); - - pRsp->pCont = buf; - pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp); - tmsgSendRsp(pRsp); - - return status == TASK_INPUT_STATUS__NORMAL ? 0 : -1; -} - int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pRsp) { SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0); int8_t status = TASK_INPUT_STATUS__NORMAL; @@ -240,7 +206,7 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock static int32_t streamTaskAppendInputBlocks(SStreamTask* pTask, const SStreamDispatchReq* pReq) { int8_t status = 0; - SStreamDataBlock* pBlock = createStreamBlockFromDispatchMsg(pReq, pReq->type, pReq->srcVgId); + SStreamDataBlock* pBlock = createStreamDataFromDispatchMsg(pReq, pReq->type, pReq->srcVgId); if (pBlock == NULL) { streamTaskInputFail(pTask); status = TASK_INPUT_STATUS__FAILED; diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index bb4b842787..fcc0195bf4 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -15,6 +15,45 @@ #include "streamInt.h" +SStreamDataBlock* createStreamBlockFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg) { + SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, pReq->totalLen); + if (pData == NULL) { + return NULL; + } + + pData->type = blockType; + pData->srcVgId = srcVg; + pData->srcTaskId = pReq->upstreamTaskId; + + int32_t blockNum = pReq->blockNum; + SArray* pArray = taosArrayInit_s(sizeof(SSDataBlock), blockNum); + if (pArray == NULL) { + taosFreeQitem(pData); + return NULL; + } + + ASSERT((pReq->blockNum == taosArrayGetSize(pReq->data)) && (pReq->blockNum == taosArrayGetSize(pReq->dataLen))); + + for (int32_t i = 0; i < blockNum; i++) { + SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*) taosArrayGetP(pReq->data, i); + SSDataBlock* pDataBlock = taosArrayGet(pArray, i); + blockDecode(pDataBlock, pRetrieve->data); + + // TODO: refactor + pDataBlock->info.window.skey = be64toh(pRetrieve->skey); + pDataBlock->info.window.ekey = be64toh(pRetrieve->ekey); + pDataBlock->info.version = be64toh(pRetrieve->version); + pDataBlock->info.watermark = be64toh(pRetrieve->watermark); + memcpy(pDataBlock->info.parTbName, pRetrieve->parTbName, TSDB_TABLE_NAME_LEN); + + pDataBlock->info.type = pRetrieve->streamBlockType; + pDataBlock->info.childId = pReq->upstreamChildId; + } + + pData->blocks = pArray; + return pData; +} + SStreamDataBlock* createStreamDataFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg) { SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, pReq->totalLen); if (pData == NULL) { diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index a162f0e770..fab7856cf6 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -89,7 +89,7 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->upstreamTaskId) < 0) return -1; - if (tDecodeI32(pDecoder, &pReq->dataSrcVgId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->srcVgId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->upstreamChildId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->upstreamNodeId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->blockNum) < 0) return -1; @@ -115,7 +115,7 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, int32_t numOfBlocks, int64_t dstTaskId) { pReq->streamId = pTask->id.streamId; - pReq->dataSrcVgId = vgId; + pReq->srcVgId = vgId; pReq->upstreamTaskId = pTask->id.taskId; pReq->upstreamChildId = pTask->info.selfChildId; pReq->upstreamNodeId = pTask->info.nodeId; From afe9b848a58c91b62755baf6b912161a005cdd10 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 12 Aug 2023 18:54:39 +0800 Subject: [PATCH 079/147] fix(tsdb): check the --- include/libs/stream/tstream.h | 2 -- source/libs/stream/src/streamDispatch.c | 10 +++++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index cee2def85f..508392ff77 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -574,8 +574,6 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq); int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq); void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq); -int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, int32_t numOfBlocks, - int64_t dstTaskId); void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq); int32_t streamSetupScheduleTrigger(SStreamTask* pTask); diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index fab7856cf6..b6a03153d3 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -25,6 +25,9 @@ typedef struct SBlockName { char parTbName[TSDB_TABLE_NAME_LEN]; } SBlockName; +static int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, + int32_t numOfBlocks, int64_t dstTaskId, int32_t type); + static void initRpcMsg(SRpcMsg* pMsg, int32_t msgType, void* pCont, int32_t contLen) { pMsg->msgType = msgType; pMsg->pCont = pCont; @@ -112,8 +115,8 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { return 0; } -int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, int32_t numOfBlocks, - int64_t dstTaskId) { +int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, + int32_t numOfBlocks, int64_t dstTaskId, int32_t type) { pReq->streamId = pTask->id.streamId; pReq->srcVgId = vgId; pReq->upstreamTaskId = pTask->id.taskId; @@ -121,6 +124,7 @@ int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTas pReq->upstreamNodeId = pTask->info.nodeId; pReq->blockNum = numOfBlocks; pReq->taskId = dstTaskId; + pReq->type = type; pReq->data = taosArrayInit(numOfBlocks, POINTER_BYTES); pReq->dataLen = taosArrayInit(numOfBlocks, sizeof(int32_t)); @@ -446,7 +450,7 @@ int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { SStreamDispatchReq req = {0}; int32_t downstreamTaskId = pTask->fixedEpDispatcher.taskId; - code = tInitStreamDispatchReq(&req, pTask, pData->srcVgId, numOfBlocks, downstreamTaskId); + code = tInitStreamDispatchReq(&req, pTask, pData->srcVgId, numOfBlocks, downstreamTaskId, ); if (code != TSDB_CODE_SUCCESS) { return code; } From 03c26a9d6df9e429f17f0be6b7f4f743e217b791 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 12 Aug 2023 19:16:20 +0800 Subject: [PATCH 080/147] refactor --- include/common/tcommon.h | 1 + source/libs/stream/inc/streamInt.h | 1 - source/libs/stream/src/streamDispatch.c | 20 +++++++++----------- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/include/common/tcommon.h b/include/common/tcommon.h index 92e64f075f..8482ba8a78 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -170,6 +170,7 @@ typedef enum EStreamType { STREAM_PULL_DATA, STREAM_PULL_OVER, STREAM_FILL_OVER, + STREAM_CHECKPOINT, STREAM_CREATE_CHILD_TABLE, STREAM_TRANS_STATE, } EStreamType; diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index 30c941d106..b0fed5dde1 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -52,7 +52,6 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock) int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq); -int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData); int32_t streamDispatchCheckMsg(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet); int32_t streamDoDispatchScanHistoryFinishMsg(SStreamTask* pTask, const SStreamScanHistoryFinishReq* pReq, int32_t vgId, diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index b6a03153d3..41ed784d16 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -440,9 +440,8 @@ int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, S return 0; } -int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { +static int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { int32_t code = 0; - int32_t numOfBlocks = taosArrayGetSize(pData->blocks); ASSERT(numOfBlocks != 0); @@ -450,7 +449,7 @@ int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { SStreamDispatchReq req = {0}; int32_t downstreamTaskId = pTask->fixedEpDispatcher.taskId; - code = tInitStreamDispatchReq(&req, pTask, pData->srcVgId, numOfBlocks, downstreamTaskId, ); + code = tInitStreamDispatchReq(&req, pTask, pData->srcVgId, numOfBlocks, downstreamTaskId, pData->type); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -491,7 +490,7 @@ int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { for (int32_t i = 0; i < vgSz; i++) { SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); - code = tInitStreamDispatchReq(&pReqs[i], pTask, pData->srcVgId, 0, pVgInfo->taskId); + code = tInitStreamDispatchReq(&pReqs[i], pTask, pData->srcVgId, 0, pVgInfo->taskId, pData->type); if (code != TSDB_CODE_SUCCESS) { goto FAIL_SHUFFLE_DISPATCH; } @@ -501,8 +500,7 @@ int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i); // TODO: do not use broadcast - if (pDataBlock->info.type == STREAM_DELETE_RESULT) { - + if (pDataBlock->info.type == STREAM_DELETE_RESULT || pDataBlock->info.type == STREAM_CHECKPOINT || pDataBlock->info.type == STREAM_TRANS_STATE) { for (int32_t j = 0; j < vgSz; j++) { if (streamAddBlockIntoDispatchMsg(pDataBlock, &pReqs[j]) < 0) { goto FAIL_SHUFFLE_DISPATCH; @@ -522,14 +520,14 @@ int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { } } - qDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to %d vgroups", pTask->id.idStr, pTask->info.selfChildId, - numOfBlocks, vgSz); + qDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to %d vgroups", pTask->id.idStr, + pTask->info.selfChildId, numOfBlocks, vgSz); for (int32_t i = 0; i < vgSz; i++) { if (pReqs[i].blockNum > 0) { SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); - qDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to vgId:%d", pTask->id.idStr, pTask->info.selfChildId, - pReqs[i].blockNum, pVgInfo->vgId); + qDebug("s-task:%s (child taskId:%d) shuffle-dispatch blocks:%d to vgId:%d", pTask->id.idStr, + pTask->info.selfChildId, pReqs[i].blockNum, pVgInfo->vgId); code = doSendDispatchMsg(pTask, &pReqs[i], pVgInfo->vgId, &pVgInfo->epSet); if (code < 0) { @@ -540,7 +538,7 @@ int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) { code = 0; - FAIL_SHUFFLE_DISPATCH: + FAIL_SHUFFLE_DISPATCH: for (int32_t i = 0; i < vgSz; i++) { taosArrayDestroyP(pReqs[i].data, taosMemoryFree); taosArrayDestroy(pReqs[i].dataLen); From 0e189f70a103a63e52f83aa2d2e9dd8d8ac205e5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 12 Aug 2023 19:44:44 +0800 Subject: [PATCH 081/147] refactor --- source/libs/stream/src/stream.c | 65 ------------------------ source/libs/stream/src/streamDispatch.c | 66 +++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 65 deletions(-) diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index 0eaeafd0b3..b9c9e40562 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -306,71 +306,6 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S // return 0; //} -// todo record the idle time for dispatch data -int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) { - if (code != TSDB_CODE_SUCCESS) { - // dispatch message failed: network error, or node not available. - // in case of the input queue is full, the code will be TSDB_CODE_SUCCESS, the and pRsp>inputStatus will be set - // flag. here we need to retry dispatch this message to downstream task immediately. handle the case the failure - // happened too fast. todo handle the shuffle dispatch failure - if (code == TSDB_CODE_STREAM_TASK_NOT_EXIST) { - qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, no-retry", pTask->id.idStr, - pRsp->downstreamTaskId, tstrerror(code)); - return code; - } else { - qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", pTask->id.idStr, - pRsp->downstreamTaskId, tstrerror(code), ++pTask->msgInfo.retryCount); - return doDispatchAllBlocks(pTask, pTask->msgInfo.pData); - } - } - - qDebug("s-task:%s receive dispatch rsp, output status:%d code:%d", pTask->id.idStr, pRsp->inputStatus, code); - - // there are other dispatch message not response yet - if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { - int32_t leftRsp = atomic_sub_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1); - qDebug("s-task:%s is shuffle, left waiting rsp %d", pTask->id.idStr, leftRsp); - if (leftRsp > 0) { - return 0; - } - } - - pTask->msgInfo.retryCount = 0; - ASSERT(pTask->outputInfo.status == TASK_OUTPUT_STATUS__WAIT); - - qDebug("s-task:%s output status is set to:%d", pTask->id.idStr, pTask->outputInfo.status); - - // the input queue of the (down stream) task that receive the output data is full, - // so the TASK_INPUT_STATUS_BLOCKED is rsp - // todo blocking the output status - if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) { - pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time - - int32_t waitDuration = 300; // 300 ms - qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 "wait for %dms and retry dispatch data", - pTask->id.idStr, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, waitDuration); - streamRetryDispatchStreamBlock(pTask, waitDuration); - } else { // pipeline send data in output queue - // this message has been sent successfully, let's try next one. - destroyStreamDataBlock(pTask->msgInfo.pData); - pTask->msgInfo.pData = NULL; - - if (pTask->msgInfo.blockingTs != 0) { - int64_t el = taosGetTimestampMs() - pTask->msgInfo.blockingTs; - qDebug("s-task:%s resume to normal from inputQ blocking, idle time:%"PRId64"ms", pTask->id.idStr, el); - pTask->msgInfo.blockingTs = 0; - } - - // now ready for next data output - atomic_store_8(&pTask->outputInfo.status, TASK_OUTPUT_STATUS__NORMAL); - - // otherwise, continue dispatch the first block to down stream task in pipeline - streamDispatchStreamBlock(pTask); - } - - return 0; -} - int32_t streamProcessRunReq(SStreamTask* pTask) { if (streamTryExec(pTask) < 0) { return -1; diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 41ed784d16..bcd45875fb 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -718,3 +718,69 @@ int32_t streamNotifyUpstreamContinue(SStreamTask* pTask) { num); return 0; } + +int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) { + if (code != TSDB_CODE_SUCCESS) { + // dispatch message failed: network error, or node not available. + // in case of the input queue is full, the code will be TSDB_CODE_SUCCESS, the and pRsp>inputStatus will be set + // flag. here we need to retry dispatch this message to downstream task immediately. handle the case the failure + // happened too fast. + // todo handle the shuffle dispatch failure + qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", pTask->id.idStr, + pRsp->downstreamTaskId, tstrerror(code), ++pTask->msgInfo.retryCount); + int32_t ret = doDispatchAllBlocks(pTask, pTask->msgInfo.pData); + if (ret != TSDB_CODE_SUCCESS) { + } + + return TSDB_CODE_SUCCESS; + } + + qDebug("s-task:%s recv dispatch rsp, downstream task input status:%d code:%d", pTask->id.idStr, pRsp->inputStatus, + code); + + // there are other dispatch message not response yet + if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { + int32_t leftRsp = atomic_sub_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1); + qDebug("s-task:%s is shuffle, left waiting rsp %d", pTask->id.idStr, leftRsp); + if (leftRsp > 0) { + return 0; + } + } + + pTask->msgInfo.retryCount = 0; + ASSERT(pTask->outputInfo.status == TASK_OUTPUT_STATUS__WAIT); + + qDebug("s-task:%s output status is set to:%d", pTask->id.idStr, pTask->outputInfo.status); + + // the input queue of the (down stream) task that receive the output data is full, + // so the TASK_INPUT_STATUS_BLOCKED is rsp + if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) { + pTask->inputStatus = TASK_INPUT_STATUS__BLOCKED; // block the input of current task, to push pressure to upstream + pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time + qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 "wait for %dms and retry dispatch data", + pTask->id.idStr, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS); + streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS); + } else { // pipeline send data in output queue + // this message has been sent successfully, let's try next one. + destroyStreamDataBlock(pTask->msgInfo.pData); + pTask->msgInfo.pData = NULL; + + if (pTask->msgInfo.blockingTs != 0) { + int64_t el = taosGetTimestampMs() - pTask->msgInfo.blockingTs; + qDebug("s-task:%s downstream task:0x%x resume to normal from inputQ blocking, blocking time:%" PRId64 "ms", + pTask->id.idStr, pRsp->downstreamTaskId, el); + pTask->msgInfo.blockingTs = 0; + + // put data into inputQ of current task is also allowed + pTask->inputStatus = TASK_INPUT_STATUS__NORMAL; + } + + // now ready for next data output + atomic_store_8(&pTask->outputInfo.status, TASK_OUTPUT_STATUS__NORMAL); + + // otherwise, continue dispatch the first block to down stream task in pipeline + streamDispatchStreamBlock(pTask); + } + + return 0; +} From 0d38f389abe7c229cb2ffb7d8c25582874051e6a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 13 Aug 2023 16:25:35 +0800 Subject: [PATCH 082/147] refactor: do some internal refactor. --- include/common/tmsgdef.h | 1 - include/libs/stream/tstream.h | 4 +- source/dnode/mgmt/mgmt_vnode/src/vmHandle.c | 1 - source/dnode/vnode/src/inc/vnodeInt.h | 1 - source/dnode/vnode/src/tq/tq.c | 51 ++++------------ source/dnode/vnode/src/tq/tqRestore.c | 17 ++++-- source/dnode/vnode/src/vnd/vnodeSvr.c | 2 - source/libs/stream/inc/streamInt.h | 1 + source/libs/stream/src/stream.c | 20 ++++++- source/libs/stream/src/streamDispatch.c | 41 +++++++++---- source/libs/stream/src/streamExec.c | 62 ++++++++----------- source/libs/stream/src/streamRecover.c | 66 +-------------------- 12 files changed, 100 insertions(+), 167 deletions(-) diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 232551007d..60172bce3d 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -254,7 +254,6 @@ enum { TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY, "stream-scan-history", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_SCAN_HISTORY_FINISH, "stream-scan-history-finish", NULL, NULL) - TD_DEF_MSG_TYPE(TDMT_STREAM_TRANSFER_STATE, "stream-transfer-state", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECK, "stream-task-check", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECKPOINT, "stream-checkpoint", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_REPORT_CHECKPOINT, "stream-report-checkpoint", NULL, NULL) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 508392ff77..d3b670d0ec 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -274,7 +274,7 @@ typedef struct SStreamStatus { int8_t schedStatus; int8_t keepTaskStatus; bool transferState; - bool appendTranstateBlock; // has append the transfer state data block already + bool appendTranstateBlock; // has append the transfer state data block already, todo: remove it int8_t timerActive; // timer is active int8_t pauseAllowed; // allowed task status to be set to be paused } SStreamStatus; @@ -582,6 +582,7 @@ int32_t streamProcessRunReq(SStreamTask* pTask); int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec); int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code); void streamTaskCloseUpstreamInput(SStreamTask* pTask, int32_t taskId); +void streamTaskOpenAllUpstreamInput(SStreamTask* pTask); int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg); @@ -629,7 +630,6 @@ int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* p int32_t streamSourceScanHistoryData(SStreamTask* pTask); int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask); -int32_t streamDispatchTransferStateMsg(SStreamTask* pTask); int32_t appendTranstateIntoInputQ(SStreamTask* pTask); // agg level diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index bed9a67303..cf57deaa22 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -742,7 +742,6 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_SCAN_HISTORY_FINISH, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_SCAN_HISTORY_FINISH_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; - if (dmSetMgmtHandle(pArray, TDMT_STREAM_TRANSFER_STATE, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECK, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECK_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index cd7704940b..d41c58b501 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -250,7 +250,6 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg); -int32_t tqProcessTaskTransferStateReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskScanHistoryFinishReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskScanHistoryFinishRsp(STQ* pTq, SRpcMsg* pMsg); int32_t tqCheckLogInWal(STQ* pTq, int64_t version); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index ad1af080fd..9dfde0fed7 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -928,6 +928,8 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) { pTask->pMsgCb = &pTq->pVnode->msgCb; pTask->pMeta = pTq->pStreamMeta; + streamTaskOpenAllUpstreamInput(pTask); + // backup the initial status, and set it to be TASK_STATUS__INIT pTask->chkInfo.version = ver; pTask->chkInfo.currentVer = ver; @@ -1272,7 +1274,8 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { if (done) { pTask->tsInfo.step2Start = taosGetTimestampMs(); - streamTaskEndScanWAL(pTask); + qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, 0.0); + appendTranstateIntoInputQ(pTask); } else { STimeWindow* pWindow = &pTask->dataRange.window; tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64 " window:%" PRId64 "-%" PRId64 @@ -1337,44 +1340,6 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { return 0; } -// notify the downstream tasks to transfer executor state after handle all history blocks. -int32_t tqProcessTaskTransferStateReq(STQ* pTq, SRpcMsg* pMsg) { - char* pReq = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); - int32_t len = pMsg->contLen - sizeof(SMsgHead); - - SStreamTransferReq req = {0}; - - SDecoder decoder; - tDecoderInit(&decoder, (uint8_t*)pReq, len); - int32_t code = tDecodeStreamScanHistoryFinishReq(&decoder, &req); - tDecoderClear(&decoder); - - tqDebug("vgId:%d start to process transfer state msg, from s-task:0x%x", pTq->pStreamMeta->vgId, req.downstreamTaskId); - - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.downstreamTaskId); - if (pTask == NULL) { - tqError("failed to find task:0x%x, it may have been dropped already. process transfer state failed", req.downstreamTaskId); - return -1; - } - - int32_t remain = streamAlignTransferState(pTask); - if (remain > 0) { - tqDebug("s-task:%s receive upstream transfer state msg, remain:%d", pTask->id.idStr, remain); - streamMetaReleaseTask(pTq->pStreamMeta, pTask); - return 0; - } - - // transfer the ownership of executor state - tqDebug("s-task:%s all upstream tasks send transfer msg, open transfer state flag", pTask->id.idStr); - ASSERT(pTask->streamTaskId.taskId != 0 && pTask->info.fillHistory == 1); - - pTask->status.transferState = true; - - streamSchedExec(pTask); - streamMetaReleaseTask(pTq->pStreamMeta, pTask); - return 0; -} - int32_t tqProcessTaskScanHistoryFinishReq(STQ* pTq, SRpcMsg* pMsg) { char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); @@ -1706,6 +1671,8 @@ int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg) { int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) { STQ* pTq = pVnode->pTq; + int32_t vgId = pVnode->config.vgId; + SMsgHead* msgStr = pMsg->pCont; char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead)); int32_t msgLen = pMsg->contLen - sizeof(SMsgHead); @@ -1722,7 +1689,9 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) { tDecoderClear(&decoder); int32_t taskId = req.taskId; - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.taskId); + tqDebug("vgId:%d receive dispatch msg to s-task:0x%"PRIx64"-0x%x", vgId, req.streamId, taskId); + + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, taskId); if (pTask != NULL) { SRpcMsg rsp = {.info = pMsg->info, .code = 0}; streamProcessDispatchMsg(pTask, &req, &rsp, false); @@ -1739,7 +1708,7 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) { FAIL: if (pMsg->info.handle == NULL) { - tqError("s-task:0x%x vgId:%d msg handle is null, abort enqueue dispatch msg", pTq->pStreamMeta->vgId, taskId); + tqError("s-task:0x%x vgId:%d msg handle is null, abort enqueue dispatch msg", vgId, taskId); return -1; } diff --git a/source/dnode/vnode/src/tq/tqRestore.c b/source/dnode/vnode/src/tq/tqRestore.c index a217bc2966..3054179416 100644 --- a/source/dnode/vnode/src/tq/tqRestore.c +++ b/source/dnode/vnode/src/tq/tqRestore.c @@ -210,14 +210,21 @@ int32_t doSetOffsetForWalReader(SStreamTask *pTask, int32_t vgId) { } static void checkForFillHistoryVerRange(SStreamTask* pTask, int64_t ver) { + const char* id = pTask->id.idStr; + if ((pTask->info.fillHistory == 1) && ver > pTask->dataRange.range.maxVer) { - qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64 - ", not scan wal anymore, set the transfer state flag", - pTask->id.idStr, ver, pTask->dataRange.range.maxVer); if (!pTask->status.appendTranstateBlock) { - pTask->status.appendTranstateBlock = true; + qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64 + ", not scan wal anymore, add transfer-state block into inputQ", + id, ver, pTask->dataRange.range.maxVer); + + double el = (taosGetTimestampMs() - pTask->tsInfo.step2Start) / 1000.0; + qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, el); appendTranstateIntoInputQ(pTask); /*int32_t code = */streamSchedExec(pTask); + } else { + qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64 ", not scan wal", + id, ver, pTask->dataRange.range.maxVer); } } } @@ -264,7 +271,7 @@ int32_t createStreamTaskRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) { continue; } - if ((pTask->info.fillHistory == 1) && pTask->status.transferState) { + if ((pTask->info.fillHistory == 1) && pTask->status.appendTranstateBlock) { ASSERT(status == TASK_STATUS__NORMAL); // the maximum version of data in the WAL has reached already, the step2 is done tqDebug("s-task:%s fill-history reach the maximum ver:%" PRId64 ", not scan wal anymore", pTask->id.idStr, diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index bf1abf5795..70c3382dc5 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -660,8 +660,6 @@ int32_t vnodeProcessStreamMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) return tqProcessTaskRetrieveRsp(pVnode->pTq, pMsg); case TDMT_VND_STREAM_SCAN_HISTORY: return tqProcessTaskScanHistory(pVnode->pTq, pMsg); - case TDMT_STREAM_TRANSFER_STATE: - return tqProcessTaskTransferStateReq(pVnode->pTq, pMsg); case TDMT_STREAM_SCAN_HISTORY_FINISH: return tqProcessTaskScanHistoryFinishReq(pVnode->pTq, pMsg); case TDMT_STREAM_SCAN_HISTORY_FINISH_RSP: diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index b0fed5dde1..7a557a744a 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -62,6 +62,7 @@ SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* int32_t streamAddEndScanHistoryMsg(SStreamTask* pTask, SRpcHandleInfo* pRpcInfo, SStreamScanHistoryFinishReq* pReq); int32_t streamNotifyUpstreamContinue(SStreamTask* pTask); int32_t streamTaskFillHistoryFinished(SStreamTask* pTask); +int32_t streamTransferStateToStreamTask(SStreamTask* pTask); extern int32_t streamBackendId; extern int32_t streamBackendCfWrapperId; diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index b9c9e40562..e9b38dfff2 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -213,6 +213,10 @@ static int32_t streamTaskAppendInputBlocks(SStreamTask* pTask, const SStreamDisp qError("vgId:%d, s-task:%s failed to receive dispatch msg, reason: out of memory", pTask->pMeta->vgId, pTask->id.idStr); } else { + if (pBlock->type == STREAM_INPUT__TRANS_STATE) { + pTask->status.appendTranstateBlock = true; + } + int32_t code = tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pBlock); // input queue is full, upstream is blocked now status = (code == TSDB_CODE_SUCCESS) ? TASK_INPUT_STATUS__NORMAL : TASK_INPUT_STATUS__BLOCKED; @@ -379,6 +383,8 @@ int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) { // use the default memory limit, refactor later. taosWriteQitem(pTask->inputQueue->queue, pItem); qDebug("s-task:%s data res enqueue, current(blocks:%d, size:%.2fMiB)", pTask->id.idStr, total, size); + } else { + ASSERT(0); } if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) { @@ -421,4 +427,16 @@ SStreamChildEpInfo * streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t } return NULL; -} \ No newline at end of file +} + +void streamTaskOpenAllUpstreamInput(SStreamTask* pTask) { + int32_t num = taosArrayGetSize(pTask->pUpstreamEpInfoList); + if (num == 0) { + return; + } + + for(int32_t i = 0; i < num; ++i) { + SStreamChildEpInfo* pInfo = taosArrayGetP(pTask->pUpstreamEpInfoList, i); + pInfo->dataAllowed = true; + } +} diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index bcd45875fb..d479dd44df 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -38,6 +38,7 @@ static int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatc if (tStartEncode(pEncoder) < 0) return -1; if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1; + if (tEncodeI32(pEncoder, pReq->type) < 0) return -1; if (tEncodeI32(pEncoder, pReq->upstreamTaskId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->srcVgId) < 0) return -1; if (tEncodeI32(pEncoder, pReq->upstreamChildId) < 0) return -1; @@ -91,6 +92,7 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1; + if (tDecodeI32(pDecoder, &pReq->type) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->upstreamTaskId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->srcVgId) < 0) return -1; if (tDecodeI32(pDecoder, &pReq->upstreamChildId) < 0) return -1; @@ -115,8 +117,8 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) { return 0; } -int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, - int32_t numOfBlocks, int64_t dstTaskId, int32_t type) { +int32_t tInitStreamDispatchReq(SStreamDispatchReq* pReq, const SStreamTask* pTask, int32_t vgId, int32_t numOfBlocks, + int64_t dstTaskId, int32_t type) { pReq->streamId = pTask->id.streamId; pReq->srcVgId = vgId; pReq->upstreamTaskId = pTask->id.taskId; @@ -456,8 +458,8 @@ static int32_t doDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* p for (int32_t i = 0; i < numOfBlocks; i++) { SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i); - code = streamAddBlockIntoDispatchMsg(pDataBlock, &req); + code = streamAddBlockIntoDispatchMsg(pDataBlock, &req); if (code != TSDB_CODE_SUCCESS) { taosArrayDestroyP(req.data, taosMemoryFree); taosArrayDestroy(req.dataLen); @@ -720,14 +722,16 @@ int32_t streamNotifyUpstreamContinue(SStreamTask* pTask) { } int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) { + const char* id = pTask->id.idStr; + if (code != TSDB_CODE_SUCCESS) { // dispatch message failed: network error, or node not available. // in case of the input queue is full, the code will be TSDB_CODE_SUCCESS, the and pRsp>inputStatus will be set // flag. here we need to retry dispatch this message to downstream task immediately. handle the case the failure // happened too fast. // todo handle the shuffle dispatch failure - qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", pTask->id.idStr, - pRsp->downstreamTaskId, tstrerror(code), ++pTask->msgInfo.retryCount); + qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", id, pRsp->downstreamTaskId, + tstrerror(code), ++pTask->msgInfo.retryCount); int32_t ret = doDispatchAllBlocks(pTask, pTask->msgInfo.pData); if (ret != TSDB_CODE_SUCCESS) { } @@ -735,22 +739,35 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i return TSDB_CODE_SUCCESS; } - qDebug("s-task:%s recv dispatch rsp, downstream task input status:%d code:%d", pTask->id.idStr, pRsp->inputStatus, - code); + qDebug("s-task:%s recv dispatch rsp from 0x%x, downstream task input status:%d code:%d", id, pRsp->downstreamTaskId, + pRsp->inputStatus, code); // there are other dispatch message not response yet if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { int32_t leftRsp = atomic_sub_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1); - qDebug("s-task:%s is shuffle, left waiting rsp %d", pTask->id.idStr, leftRsp); + qDebug("s-task:%s is shuffle, left waiting rsp %d", id, leftRsp); if (leftRsp > 0) { return 0; } } + // transtate msg has been sent to downstream successfully. let's transfer the fill-history task state + SStreamDataBlock* p = pTask->msgInfo.pData; + if (p->type == STREAM_INPUT__TRANS_STATE) { + qDebug("s-task:%s dispatch transtate msg to downstream successfully, start to transfer state", id); + ASSERT(pTask->info.fillHistory == 1); + code = streamTransferStateToStreamTask(pTask); + + if (code != TSDB_CODE_SUCCESS) { + atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); + return code; + } + } + pTask->msgInfo.retryCount = 0; ASSERT(pTask->outputInfo.status == TASK_OUTPUT_STATUS__WAIT); - qDebug("s-task:%s output status is set to:%d", pTask->id.idStr, pTask->outputInfo.status); + qDebug("s-task:%s output status is set to:%d", id, pTask->outputInfo.status); // the input queue of the (down stream) task that receive the output data is full, // so the TASK_INPUT_STATUS_BLOCKED is rsp @@ -758,7 +775,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i pTask->inputStatus = TASK_INPUT_STATUS__BLOCKED; // block the input of current task, to push pressure to upstream pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 "wait for %dms and retry dispatch data", - pTask->id.idStr, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS); + id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS); streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS); } else { // pipeline send data in output queue // this message has been sent successfully, let's try next one. @@ -767,8 +784,8 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i if (pTask->msgInfo.blockingTs != 0) { int64_t el = taosGetTimestampMs() - pTask->msgInfo.blockingTs; - qDebug("s-task:%s downstream task:0x%x resume to normal from inputQ blocking, blocking time:%" PRId64 "ms", - pTask->id.idStr, pRsp->downstreamTaskId, el); + qDebug("s-task:%s downstream task:0x%x resume to normal from inputQ blocking, blocking time:%" PRId64 "ms", id, + pRsp->downstreamTaskId, el); pTask->msgInfo.blockingTs = 0; // put data into inputQ of current task is also allowed diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index bbfaa8cb9d..269334f54d 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -287,7 +287,7 @@ static void waitForTaskIdle(SStreamTask* pTask, SStreamTask* pStreamTask) { } } -static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { +int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { SStreamMeta* pMeta = pTask->pMeta; SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId); @@ -301,7 +301,7 @@ static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { pStreamTask->id.idStr); } - ASSERT(pStreamTask->historyTaskId.taskId == pTask->id.taskId && pTask->status.transferState == true); + ASSERT(pStreamTask->historyTaskId.taskId == pTask->id.taskId && pTask->status.appendTranstateBlock == true); STimeWindow* pTimeWindow = &pStreamTask->dataRange.window; @@ -383,11 +383,9 @@ static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { return TSDB_CODE_SUCCESS; } -static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { +int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { int32_t code = TSDB_CODE_SUCCESS; - if (!pTask->status.transferState) { - return code; - } + ASSERT(pTask->status.appendTranstateBlock == 1); int32_t level = pTask->info.taskLevel; if (level == TASK_LEVEL__SOURCE) { @@ -513,14 +511,12 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock // transfer the ownership of executor state if (level == TASK_LEVEL__SOURCE) { - qDebug("s-task:%s open transfer state flag for source task", id); + qDebug("s-task:%s add transfer-state block into outputQ", id); } else { - qDebug("s-task:%s all upstream tasks send transfer msg, open transfer state flag", id); + qDebug("s-task:%s all upstream tasks send transfer-state block, add transfer-state block into outputQ", id); ASSERT(pTask->streamTaskId.taskId != 0 && pTask->info.fillHistory == 1); } - pTask->status.transferState = true; - // dispatch the tran-state block to downstream task immediately int32_t type = pTask->outputInfo.type; if ((level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) && @@ -639,16 +635,7 @@ int32_t streamTaskEndScanWAL(SStreamTask* pTask) { qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, el); // 1. notify all downstream tasks to transfer executor state after handle all history blocks. -// pTask->status.transferState = true; appendTranstateIntoInputQ(pTask); - - // 2. do transfer stream task operator states. - // todo remove this -// int32_t code = streamDoTransferStateToStreamTask(pTask); -// if (code != TSDB_CODE_SUCCESS) { // todo handle error -// return code; -// } - return TSDB_CODE_SUCCESS; } @@ -667,35 +654,36 @@ int32_t streamTryExec(SStreamTask* pTask) { } // todo the task should be commit here - if (taosQueueEmpty(pTask->inputQueue->queue)) { +// if (taosQueueEmpty(pTask->inputQueue->queue)) { // fill-history WAL scan has completed - if (pTask->status.transferState) { - code = streamTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { - atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); - return code; - } +// if (pTask->status.transferState) { +// code = streamTransferStateToStreamTask(pTask); +// if (code != TSDB_CODE_SUCCESS) { +// atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); +// return code; +// } // the schedStatus == TASK_SCHED_STATUS__ACTIVE, streamSchedExec cannot be executed, so execute once again by // call this function (streamExecForAll) directly. -// code = streamExecForAll(pTask); -// if (code < 0) { - // do nothing -// } - } + // code = streamExecForAll(pTask); + // if (code < 0) { + // do nothing + // } +// } - atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); - qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, - streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus); - } else { +// atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); +// qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, +// streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus); +// } else { atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus); - if ((!streamTaskShouldStop(&pTask->status)) && (!streamTaskShouldPause(&pTask->status))) { + if (!(taosQueueEmpty(pTask->inputQueue->queue) || streamTaskShouldStop(&pTask->status) || + streamTaskShouldPause(&pTask->status))) { streamSchedExec(pTask); } - } +// } } else { qDebug("s-task:%s already started to exec by other thread, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus); diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 708524bf10..72dae735e1 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -372,49 +372,6 @@ int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask) { return 0; } -static int32_t doDispatchTransferMsg(SStreamTask* pTask, const SStreamTransferReq* pReq, int32_t vgId, SEpSet* pEpSet) { - void* buf = NULL; - int32_t code = -1; - SRpcMsg msg = {0}; - - int32_t tlen; - tEncodeSize(tEncodeStreamScanHistoryFinishReq, pReq, tlen, code); - if (code < 0) { - return -1; - } - - buf = rpcMallocCont(sizeof(SMsgHead) + tlen); - if (buf == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - - ((SMsgHead*)buf)->vgId = htonl(vgId); - void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead)); - - SEncoder encoder; - tEncoderInit(&encoder, abuf, tlen); - if ((code = tEncodeStreamScanHistoryFinishReq(&encoder, pReq)) < 0) { - if (buf) { - rpcFreeCont(buf); - } - return code; - } - - tEncoderClear(&encoder); - - msg.contLen = tlen + sizeof(SMsgHead); - msg.pCont = buf; - msg.msgType = TDMT_STREAM_TRANSFER_STATE; - msg.info.noResp = 1; - - tmsgSendReq(pEpSet, &msg); - qDebug("s-task:%s level:%d, status:%s dispatch transfer state msg to taskId:0x%x (vgId:%d)", pTask->id.idStr, - pTask->info.taskLevel, streamGetTaskStatusStr(pTask->status.taskStatus), pReq->downstreamTaskId, vgId); - - return 0; -} - int32_t appendTranstateIntoInputQ(SStreamTask* pTask) { SStreamDataBlock* pTranstate = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, sizeof(SSDataBlock)); if (pTranstate == NULL) { @@ -442,6 +399,8 @@ int32_t appendTranstateIntoInputQ(SStreamTask* pTask) { return TSDB_CODE_OUT_OF_MEMORY; } + pTask->status.appendTranstateBlock = true; + qDebug("s-task:%s set sched-status:%d, prev:%d", pTask->id.idStr, TASK_SCHED_STATUS__INACTIVE, pTask->status.schedStatus); pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE; streamSchedExec(pTask); @@ -449,27 +408,6 @@ int32_t appendTranstateIntoInputQ(SStreamTask* pTask) { return TSDB_CODE_SUCCESS; } -int32_t streamDispatchTransferStateMsg(SStreamTask* pTask) { - SStreamTransferReq req = { .streamId = pTask->id.streamId, .childId = pTask->info.selfChildId }; - - // serialize - if (pTask->outputInfo.type == TASK_OUTPUT__FIXED_DISPATCH) { - req.downstreamTaskId = pTask->fixedEpDispatcher.taskId; - doDispatchTransferMsg(pTask, &req, pTask->fixedEpDispatcher.nodeId, &pTask->fixedEpDispatcher.epSet); - } else if (pTask->outputInfo.type == TASK_OUTPUT__SHUFFLE_DISPATCH) { - SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos; - - int32_t numOfVgs = taosArrayGetSize(vgInfo); - for (int32_t i = 0; i < numOfVgs; i++) { - SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i); - req.downstreamTaskId = pVgInfo->taskId; - doDispatchTransferMsg(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet); - } - } - - return 0; -} - // agg int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask) { pTask->numOfWaitingUpstream = taosArrayGetSize(pTask->pUpstreamEpInfoList); From a81cc9aebfead3f455e2b5c251ab59ae0934b8dd Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 13 Aug 2023 17:31:09 +0800 Subject: [PATCH 083/147] fix(stream): fix the error when no agg tasks exist. --- source/libs/stream/src/streamExec.c | 40 ++++++++++++++++++----------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 269334f54d..fa3f149a43 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -509,24 +509,34 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock } } - // transfer the ownership of executor state - if (level == TASK_LEVEL__SOURCE) { - qDebug("s-task:%s add transfer-state block into outputQ", id); - } else { - qDebug("s-task:%s all upstream tasks send transfer-state block, add transfer-state block into outputQ", id); - ASSERT(pTask->streamTaskId.taskId != 0 && pTask->info.fillHistory == 1); - } - // dispatch the tran-state block to downstream task immediately int32_t type = pTask->outputInfo.type; - if ((level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) && - (type == TASK_OUTPUT__FIXED_DISPATCH || type == TASK_OUTPUT__SHUFFLE_DISPATCH)) { - pBlock->srcVgId = pTask->pMeta->vgId; - code = taosWriteQitem(pTask->outputInfo.queue->queue, pBlock); - if (code == 0) { - streamDispatchStreamBlock(pTask); + + // transfer the ownership of executor state + if (type == TASK_OUTPUT__FIXED_DISPATCH || type == TASK_OUTPUT__SHUFFLE_DISPATCH) { + if (level == TASK_LEVEL__SOURCE) { + qDebug("s-task:%s add transfer-state block into outputQ", id); } else { - streamFreeQitem((SStreamQueueItem*)pBlock); + qDebug("s-task:%s all upstream tasks send transfer-state block, add transfer-state block into outputQ", id); + ASSERT(pTask->streamTaskId.taskId != 0 && pTask->info.fillHistory == 1); + } + + if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) { + pBlock->srcVgId = pTask->pMeta->vgId; + code = taosWriteQitem(pTask->outputInfo.queue->queue, pBlock); + if (code == 0) { + streamDispatchStreamBlock(pTask); + } else { + streamFreeQitem((SStreamQueueItem*)pBlock); + } + } + } else { // non-dispatch task, do task state transfer directly + qDebug("s-task:%s non-dispatch task, start to transfer state directly", id); + ASSERT(pTask->info.fillHistory == 1); + code = streamTransferStateToStreamTask(pTask); + + if (code != TSDB_CODE_SUCCESS) { + atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); } } From 72e509a46c8832ade6b2986bb8b09efd227ddad0 Mon Sep 17 00:00:00 2001 From: Alex Duan <51781608+DuanKuanJun@users.noreply.github.com> Date: Sun, 13 Aug 2023 17:39:24 +0800 Subject: [PATCH 084/147] Update 08-taos-shell.md --- docs/zh/14-reference/08-taos-shell.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/zh/14-reference/08-taos-shell.md b/docs/zh/14-reference/08-taos-shell.md index 3423cf35bb..1caa580b73 100644 --- a/docs/zh/14-reference/08-taos-shell.md +++ b/docs/zh/14-reference/08-taos-shell.md @@ -89,3 +89,11 @@ taos -h h1.taos.com -s "use db; show tables;" - 执行 `RESET QUERY CACHE` 可清除本地表 Schema 的缓存 - 批量执行 SQL 语句。可以将一系列的 TDengine CLI 命令(以英文 ; 结尾,每个 SQL 语句为一行)按行存放在文件里,在 TDengine CLI 里执行命令 `source ` 自动执行该文件里所有的 SQL 语句 - 输入 `q` 或 `quit` 或 `exit` 回车,可以退出 TDengine CLI + +## TDengine CLI 导出查询结果到文件中 + +- 可以使用符号 “>>” 导出查询结果到某个文件中,语法为: sql 查询语句 >> ‘输出文件名’; 输出文件如果不写路径的话,将输出至当前目录下。如 select * from d0 >> ‘/root/d0.csv’; 将把查询结果输出到 /root/d0.csv 中。 + +## TDengine CLI 导入文件中的数据到表中 + +- 可以使用 insert into table_name file '输入文件名',把上一步中导出的数据文件再导入到指定表中。如 insert into d0 file '/root/d0.csv'; 表示把上面导出的数据全部再导致至 d0 表中。 From 6688d70ba41400c83e9686e0ca6565dc861b1327 Mon Sep 17 00:00:00 2001 From: slzhou Date: Sun, 13 Aug 2023 18:46:55 +0800 Subject: [PATCH 085/147] fix: fix planner test error --- source/libs/nodes/src/nodesCodeFuncs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 3540f8cb70..a2de0bc63a 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -6654,7 +6654,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) { case QUERY_NODE_LOGIC_PLAN: return logicPlanToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: - return physiTableScanNodeToJson(pObj, pJson); + return physiTagScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN: return physiScanNodeToJson(pObj, pJson); case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: From 98f40325e9147fad41823ed32a2188cd707e11eb Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 13 Aug 2023 20:02:15 +0800 Subject: [PATCH 086/147] fix(stream): fix memory leak. --- source/dnode/snode/src/snode.c | 2 ++ source/libs/stream/src/streamData.c | 41 +------------------------ source/libs/stream/src/streamDispatch.c | 2 +- source/libs/stream/src/streamExec.c | 3 ++ 4 files changed, 7 insertions(+), 41 deletions(-) diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 4000e72835..635fdcf459 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -77,6 +77,8 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) { pTask->chkInfo.version = ver; pTask->pMeta = pSnode->pMeta; + streamTaskOpenAllUpstreamInput(pTask); + pTask->pState = streamStateOpen(pSnode->path, pTask, false, -1, -1); if (pTask->pState == NULL) { return -1; diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c index fcc0195bf4..fc1b788b77 100644 --- a/source/libs/stream/src/streamData.c +++ b/source/libs/stream/src/streamData.c @@ -15,45 +15,6 @@ #include "streamInt.h" -SStreamDataBlock* createStreamBlockFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg) { - SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, pReq->totalLen); - if (pData == NULL) { - return NULL; - } - - pData->type = blockType; - pData->srcVgId = srcVg; - pData->srcTaskId = pReq->upstreamTaskId; - - int32_t blockNum = pReq->blockNum; - SArray* pArray = taosArrayInit_s(sizeof(SSDataBlock), blockNum); - if (pArray == NULL) { - taosFreeQitem(pData); - return NULL; - } - - ASSERT((pReq->blockNum == taosArrayGetSize(pReq->data)) && (pReq->blockNum == taosArrayGetSize(pReq->dataLen))); - - for (int32_t i = 0; i < blockNum; i++) { - SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*) taosArrayGetP(pReq->data, i); - SSDataBlock* pDataBlock = taosArrayGet(pArray, i); - blockDecode(pDataBlock, pRetrieve->data); - - // TODO: refactor - pDataBlock->info.window.skey = be64toh(pRetrieve->skey); - pDataBlock->info.window.ekey = be64toh(pRetrieve->ekey); - pDataBlock->info.version = be64toh(pRetrieve->version); - pDataBlock->info.watermark = be64toh(pRetrieve->watermark); - memcpy(pDataBlock->info.parTbName, pRetrieve->parTbName, TSDB_TABLE_NAME_LEN); - - pDataBlock->info.type = pRetrieve->streamBlockType; - pDataBlock->info.childId = pReq->upstreamChildId; - } - - pData->blocks = pArray; - return pData; -} - SStreamDataBlock* createStreamDataFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg) { SStreamDataBlock* pData = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, pReq->totalLen); if (pData == NULL) { @@ -243,7 +204,7 @@ void streamFreeQitem(SStreamQueueItem* data) { if (type == STREAM_INPUT__GET_RES) { blockDataDestroy(((SStreamTrigger*)data)->pBlock); taosFreeQitem(data); - } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE) { + } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE || type == STREAM_INPUT__TRANS_STATE) { taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)blockDataFreeRes); taosFreeQitem(data); } else if (type == STREAM_INPUT__DATA_SUBMIT) { diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index d479dd44df..94e005b790 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -774,7 +774,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) { pTask->inputStatus = TASK_INPUT_STATUS__BLOCKED; // block the input of current task, to push pressure to upstream pTask->msgInfo.blockingTs = taosGetTimestampMs(); // record the blocking start time - qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 "wait for %dms and retry dispatch data", + qError("s-task:%s inputQ of downstream task:0x%x is full, time:%" PRId64 " wait for %dms and retry dispatch data", id, pRsp->downstreamTaskId, pTask->msgInfo.blockingTs, DISPATCH_RETRY_INTERVAL_MS); streamRetryDispatchStreamBlock(pTask, DISPATCH_RETRY_INTERVAL_MS); } else { // pipeline send data in output queue diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index fa3f149a43..3b954793de 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -504,6 +504,7 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SINK) { int32_t remain = streamAlignTransferState(pTask); if (remain > 0) { + streamFreeQitem((SStreamQueueItem*)pBlock); qDebug("s-task:%s receive upstream transfer state msg, remain:%d", id, remain); return 0; } @@ -532,6 +533,8 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock } } else { // non-dispatch task, do task state transfer directly qDebug("s-task:%s non-dispatch task, start to transfer state directly", id); + + streamFreeQitem((SStreamQueueItem*)pBlock); ASSERT(pTask->info.fillHistory == 1); code = streamTransferStateToStreamTask(pTask); From dd0bc0e0500d758a4ac99523f5afeeda021779fb Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 14 Aug 2023 09:42:55 +0800 Subject: [PATCH 087/147] fix:heap use after free --- include/libs/wal/wal.h | 1 - source/dnode/vnode/src/tq/tqRead.c | 3 +++ source/dnode/vnode/src/tq/tqUtil.c | 1 - source/libs/wal/src/walRead.c | 1 - 4 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index 1f7323a06a..cfe70a186c 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -206,7 +206,6 @@ void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64 void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset); // only for tq usage -void walSetReaderCapacity(SWalReader *pRead, int32_t capacity); int32_t walFetchHead(SWalReader *pRead, int64_t ver); int32_t walFetchBody(SWalReader *pRead); int32_t walSkipFetchBody(SWalReader *pRead); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 6c091fa4cb..252a0642fa 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -219,11 +219,13 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t goto END; } + pHead = &(pHandle->pWalReader->pHead->head); if (isValValidForTable(pHandle, pHead)) { code = 0; goto END; } else { offset++; + code = -1; continue; } } @@ -234,6 +236,7 @@ int32_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, uint64_t } offset++; } + code = -1; } END: diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index 42aac52c63..b7fd505784 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -216,7 +216,6 @@ static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, walReaderVerifyOffset(pHandle->pWalReader, offset); int64_t fetchVer = offset->version; - walSetReaderCapacity(pHandle->pWalReader, 2048); int totalRows = 0; while (1) { int32_t savedEpoch = atomic_load_32(&pHandle->epoch); diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 01404494e3..d9e43e4324 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -250,7 +250,6 @@ int32_t walReaderSeekVer(SWalReader *pReader, int64_t ver) { return 0; } -void walSetReaderCapacity(SWalReader *pRead, int32_t capacity) { pRead->capacity = capacity; } int32_t walFetchHead(SWalReader *pRead, int64_t ver) { int64_t code; From 0aa35987ab91f1e41a8799eaee2570817dc270e6 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 14 Aug 2023 09:45:04 +0800 Subject: [PATCH 088/147] Update 5dnode3mnodeRoll.py --- tests/system-test/6-cluster/5dnode3mnodeRoll.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/6-cluster/5dnode3mnodeRoll.py b/tests/system-test/6-cluster/5dnode3mnodeRoll.py index 38ac47f777..9d62eb3b4b 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRoll.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRoll.py @@ -27,7 +27,7 @@ import threading import time import json -BASEVERSION = "3.1.0.0" +BASEVERSION = "3.1.1.0" class TDTestCase: From 2806fe1c563b13a554147b9c6b21f219a1892fde Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 14 Aug 2023 10:58:24 +0800 Subject: [PATCH 089/147] fix(stream): ignore the related stream task destory msg in transfer state. --- source/dnode/vnode/src/tq/tq.c | 2 +- source/libs/stream/src/streamDispatch.c | 6 ++--- source/libs/stream/src/streamExec.c | 32 ++++++++++++++----------- 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 9dfde0fed7..4b666ec54a 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1527,7 +1527,7 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) { if (pTask) { streamProcessDispatchRsp(pTask, pRsp, pMsg->code); streamMetaReleaseTask(pTq->pStreamMeta, pTask); - return 0; + return TSDB_CODE_SUCCESS; } else { tqDebug("vgId:%d failed to handle the dispatch rsp, since find task:0x%x failed", vgId, taskId); return TSDB_CODE_INVALID_MSG; diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 94e005b790..06861454d1 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -758,10 +758,10 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i ASSERT(pTask->info.fillHistory == 1); code = streamTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { - atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); - return code; + if (code != TSDB_CODE_SUCCESS) { // todo: do nothing if error happens +// atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); } + return TSDB_CODE_SUCCESS; } pTask->msgInfo.retryCount = 0; diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 3b954793de..a3ff752bc5 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -292,9 +292,20 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId); if (pStreamTask == NULL) { - // todo: destroy the fill-history task here - qError("s-task:%s failed to find related stream task:0x%x, it may have been destroyed or closed", pTask->id.idStr, - pTask->streamTaskId.taskId); + qError( + "s-task:%s failed to find related stream task:0x%x, it may have been destroyed or closed, destroy the related " + "fill-history task", + pTask->id.idStr, pTask->streamTaskId.taskId); + + // 1. free it and remove fill-history task from disk meta-store + streamMetaUnregisterTask(pMeta, pTask->id.streamId, pTask->id.taskId); + + // 2. save to disk + taosWLockLatch(&pMeta->lock); + if (streamMetaCommit(pMeta) < 0) { + // persist to disk + } + taosWUnLockLatch(&pMeta->lock); return TSDB_CODE_STREAM_TASK_NOT_EXIST; } else { qDebug("s-task:%s fill-history task end, update related stream task:%s info, transfer exec state", pTask->id.idStr, @@ -334,9 +345,6 @@ int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { qDebug("s-task:%s no need to update time window for non-source task", pStreamTask->id.idStr); } - // todo check the output queue for fill-history task, and wait for it complete - - // 1. expand the query time window for stream task of WAL scanner pTimeWindow->skey = INT64_MIN; qStreamInfoResetTimewindowFilter(pStreamTask->exec.pExecutor); @@ -390,15 +398,10 @@ int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { int32_t level = pTask->info.taskLevel; if (level == TASK_LEVEL__SOURCE) { streamTaskFillHistoryFinished(pTask); + } + + if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) { // do transfer task operator states. code = streamDoTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { // todo handle this - return code; - } - } else if (level == TASK_LEVEL__AGG) { // do transfer task operator states. - code = streamDoTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { // todo handle this - return code; - } } return code; @@ -522,6 +525,7 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock ASSERT(pTask->streamTaskId.taskId != 0 && pTask->info.fillHistory == 1); } + // agg task should dispatch trans-state msg to sink task, to flush all data to sink task. if (level == TASK_LEVEL__AGG || level == TASK_LEVEL__SOURCE) { pBlock->srcVgId = pTask->pMeta->vgId; code = taosWriteQitem(pTask->outputInfo.queue->queue, pBlock); From ac446a3b959965af2c52daaa7ed5715faa787113 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Mon, 14 Aug 2023 11:26:47 +0800 Subject: [PATCH 090/147] Update 08-taos-shell.md --- docs/en/14-reference/08-taos-shell.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/en/14-reference/08-taos-shell.md b/docs/en/14-reference/08-taos-shell.md index e66da7ec17..7e0433a8b2 100644 --- a/docs/en/14-reference/08-taos-shell.md +++ b/docs/en/14-reference/08-taos-shell.md @@ -81,6 +81,14 @@ For example: taos -h h1.taos.com -s "use db; show tables;" ``` +## Export query results to a file + +- You can use ">>" to export the query results to a file, the syntax is like `select * from table >> file`. If there is only file name without path, the file will be generated under the current working directory of TDegnine CLI. + +## Import data from CSV file + +- You can use `insert into table_name file 'fileName'` to import the data from the specified file into the specified table. For example, `insert into d0 file '/root/d0.csv';` means importing the data in file "/root/d0.csv" into table "d0". If there is only file name without path, that means the file is located under current working directory of TDengine CLI. + ## TDengine CLI tips - You can use the up and down keys to iterate the history of commands entered @@ -89,3 +97,5 @@ taos -h h1.taos.com -s "use db; show tables;" - Execute `RESET QUERY CACHE` to clear the local cache of the table schema - Execute SQL statements in batches. You can store a series of shell commands (ending with ;, one line for each SQL command) in a script file and execute the command `source ` in the TDengine CLI to execute all SQL commands in that file automatically - Enter `q` to exit TDengine CLI + + From e8c9a019a4938671fbc0a77fe4e523139c987dd8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 14 Aug 2023 11:41:24 +0800 Subject: [PATCH 091/147] fix(stream): remove the invalid set of scheduler status. --- include/libs/stream/tstream.h | 1 - source/libs/stream/src/streamExec.c | 10 ---------- source/libs/stream/src/streamRecover.c | 2 -- 3 files changed, 13 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index d3b670d0ec..02bb65b762 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -593,7 +593,6 @@ int32_t streamTaskOutputResultBlock(SStreamTask* pTask, SStreamDataBlock* pBlock bool streamTaskShouldStop(const SStreamStatus* pStatus); bool streamTaskShouldPause(const SStreamStatus* pStatus); bool streamTaskIsIdle(const SStreamTask* pTask); -int32_t streamTaskEndScanWAL(SStreamTask* pTask); SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId); int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize); diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index a3ff752bc5..37c5808e02 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -646,16 +646,6 @@ bool streamTaskIsIdle(const SStreamTask* pTask) { pTask->status.taskStatus == TASK_STATUS__DROPPING); } -int32_t streamTaskEndScanWAL(SStreamTask* pTask) { - const char* id = pTask->id.idStr; - double el = (taosGetTimestampMs() - pTask->tsInfo.step2Start) / 1000.0; - qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, el); - - // 1. notify all downstream tasks to transfer executor state after handle all history blocks. - appendTranstateIntoInputQ(pTask); - return TSDB_CODE_SUCCESS; -} - int32_t streamTryExec(SStreamTask* pTask) { // this function may be executed by multi-threads, so status check is required. int8_t schedStatus = diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 72dae735e1..42ff9b9b4e 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -400,9 +400,7 @@ int32_t appendTranstateIntoInputQ(SStreamTask* pTask) { } pTask->status.appendTranstateBlock = true; - qDebug("s-task:%s set sched-status:%d, prev:%d", pTask->id.idStr, TASK_SCHED_STATUS__INACTIVE, pTask->status.schedStatus); - pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE; streamSchedExec(pTask); return TSDB_CODE_SUCCESS; From 5ec6b64aab0f37930dcb9c4c45b1ece6663f664a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 14 Aug 2023 13:12:41 +0800 Subject: [PATCH 092/147] fix(stream): add logs. --- source/dnode/vnode/src/tq/tqRead.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 9b8f1781cb..a875febd09 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -339,8 +339,12 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con void* pBody = POINTER_SHIFT(pReader->pHead->head.body, sizeof(SMsgHead)); int32_t len = pReader->pHead->head.bodyLen - sizeof(SMsgHead); - extractDelDataBlock(pBody, len, ver, (SStreamRefDataBlock**)pItem); - tqDebug("s-task:%s delete msg extract from WAL, len:%d, ver:%"PRId64, id, len, ver); + code = extractDelDataBlock(pBody, len, ver, (SStreamRefDataBlock**)pItem); + if (code != TSDB_CODE_SUCCESS) { + tqError("s-task:%s extract delete msg from WAL failed, code:%s", id, tstrerror(code)); + } else { + tqDebug("s-task:%s delete msg extract from WAL, len:%d, ver:%"PRId64, id, len, ver); + } } else { ASSERT(0); } From 3f90ca4b38721b4890f00798d2f694071b98b0af Mon Sep 17 00:00:00 2001 From: Alex Duan <417921451@qq.com> Date: Mon, 14 Aug 2023 14:19:24 +0800 Subject: [PATCH 093/147] fix: set max_binary_display_width can not show all text --- tests/system-test/0-others/walRetention.py | 3 +-- tools/shell/src/shellEngine.c | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/system-test/0-others/walRetention.py b/tests/system-test/0-others/walRetention.py index 2b340b7969..5257b7644a 100644 --- a/tests/system-test/0-others/walRetention.py +++ b/tests/system-test/0-others/walRetention.py @@ -460,8 +460,7 @@ class TDTestCase: #self.test_db("db2", 5, 10*24*3600, 2*1024) # 2M size # period + size - self.test_db("db", checkTime = 5*60, wal_period = 60, wal_size_kb=10) - #self.test_db("db", checkTime = 3*60, wal_period = 0, wal_size_kb=0) + self.test_db("db", checkTime = 3*60, wal_period = 60, wal_size_kb=500) def stop(self): diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index e9dd067ac4..0148ebc3c5 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -776,7 +776,7 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) { if (field->bytes > shell.args.displayWidth) { return TMAX(shell.args.displayWidth, width); } else { - return TMAX(field->bytes, width); + return TMAX(field->bytes + 2, width); } case TSDB_DATA_TYPE_NCHAR: @@ -785,7 +785,7 @@ int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision) { if (bytes > shell.args.displayWidth) { return TMAX(shell.args.displayWidth, width); } else { - return TMAX(bytes, width); + return TMAX(bytes + 2, width); } } From c97b9249fc4e08667648d28ab577828ff8640dd7 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 14 Aug 2023 14:38:28 +0800 Subject: [PATCH 094/147] cos: only for linux --- cmake/cmake.options | 4 ++ contrib/CMakeLists.txt | 9 +++-- source/common/src/tglobal.c | 2 + source/dnode/vnode/CMakeLists.txt | 56 +++++++++++++++------------ source/dnode/vnode/src/vnd/vnodeCos.c | 14 +++++++ 5 files changed, 57 insertions(+), 28 deletions(-) diff --git a/cmake/cmake.options b/cmake/cmake.options index ea5efcb13a..1d4e9ba515 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -125,12 +125,16 @@ option( ON ) +IF(${TD_LINUX}) + option( BUILD_WITH_COS "If build with cos" ON ) +ENDIF () + option( BUILD_WITH_SQLITE "If build with sqlite" diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 452192a288..e3e48ac3a1 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -6,6 +6,8 @@ function(cat IN_FILE OUT_FILE) file(APPEND ${OUT_FILE} "${CONTENTS}") endfunction(cat IN_FILE OUT_FILE) +if(${TD_LINUX}) + set(CONTRIB_TMP_FILE3 "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in3") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE3}) @@ -35,6 +37,8 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . execute_process(COMMAND "${CMAKE_COMMAND}" --build . WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") +endif(${TD_LINUX}) + set(CONTRIB_TMP_FILE "${CMAKE_BINARY_DIR}/deps_tmp_CMakeLists.txt.in") configure_file("${TD_SUPPORT_DIR}/deps_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -388,7 +392,7 @@ endif() # cos if(${BUILD_WITH_COS}) - if(NOT ${TD_WINDOWS}) + if(${TD_LINUX}) set(CMAKE_PREFIX_PATH $ENV{HOME}/.cos-local.1) #ADD_DEFINITIONS(-DMINIXML_LIBRARY=${CMAKE_BINARY_DIR}/build/lib/libxml.a) option(ENABLE_TEST "Enable the tests" OFF) @@ -406,10 +410,9 @@ if(${BUILD_WITH_COS}) ) set(CMAKE_PROJECT_NAME ${ORIG_CMAKE_PROJECT_NAME}) - else() - endif(NOT ${TD_WINDOWS}) + endif(${TD_LINUX}) endif(${BUILD_WITH_COS}) # lucene diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 2447e02698..3595347db3 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -292,7 +292,9 @@ int32_t taosSetS3Cfg(SConfig *pCfg) { } } if (tsS3BucketName[0] != '<' && tsDiskCfgNum > 1) { +#ifdef USE_COS tsS3Enabled = true; +#endif } return 0; diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index a07e38e53b..052b6be37f 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -135,12 +135,6 @@ else() endif() endif() -set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") -find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) -find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) -find_library(MINIXML_LIBRARY mxml) -find_library(CURL_LIBRARY curl) - target_link_libraries( vnode PUBLIC os @@ -160,28 +154,24 @@ target_link_libraries( PUBLIC transport PUBLIC stream PUBLIC index - - # s3 - cos_c_sdk_static - ${APR_UTIL_LIBRARY} - ${APR_LIBRARY} - ${MINIXML_LIBRARY} - ${CURL_LIBRARY} ) -IF (TD_GRANT) - TARGET_LINK_LIBRARIES(vnode PUBLIC grant) -ENDIF () +if(${TD_LINUX}) +set(CMAKE_FIND_LIBRARY_SUFFIXES ".a") +find_library(APR_LIBRARY apr-1 PATHS /usr/local/apr/lib/) +find_library(APR_UTIL_LIBRARY aprutil-1 PATHS /usr/local/apr/lib/) +find_library(MINIXML_LIBRARY mxml) +find_library(CURL_LIBRARY curl) +target_link_libraries( + vnode -target_compile_definitions(vnode PUBLIC -DMETA_REFACT) - -if(${BUILD_WITH_INVERTEDINDEX}) - add_definitions(-DUSE_INVERTED_INDEX) -endif(${BUILD_WITH_INVERTEDINDEX}) - -if(${BUILD_WITH_ROCKSDB}) - add_definitions(-DUSE_ROCKSDB) -endif(${BUILD_WITH_ROCKSDB}) + # s3 + PUBLIC cos_c_sdk_static + PUBLIC ${APR_UTIL_LIBRARY} + PUBLIC ${APR_LIBRARY} + PUBLIC ${MINIXML_LIBRARY} + PUBLIC ${CURL_LIBRARY} +) # s3 FIND_PROGRAM(APR_CONFIG_BIN NAMES apr-config apr-1-config PATHS /usr/bin /usr/local/bin /usr/local/apr/bin/) @@ -199,6 +189,22 @@ target_include_directories( PUBLIC "$ENV{HOME}/.cos-local.1/include" ) +endif(${TD_LINUX}) + +IF (TD_GRANT) + TARGET_LINK_LIBRARIES(vnode PUBLIC grant) +ENDIF () + +target_compile_definitions(vnode PUBLIC -DMETA_REFACT) + +if(${BUILD_WITH_INVERTEDINDEX}) + add_definitions(-DUSE_INVERTED_INDEX) +endif(${BUILD_WITH_INVERTEDINDEX}) + +if(${BUILD_WITH_ROCKSDB}) + add_definitions(-DUSE_ROCKSDB) +endif(${BUILD_WITH_ROCKSDB}) + if(${BUILD_TEST}) add_subdirectory(test) endif(${BUILD_TEST}) diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index a40e046972..52d5fc2b1b 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -12,6 +12,7 @@ extern char tsS3AccessKeySecret[]; extern char tsS3BucketName[]; extern char tsS3AppId[]; +#ifdef USE_COS int32_t s3Init() { if (cos_http_io_initialize(NULL, 0) != COSE_OK) { return -1; @@ -294,3 +295,16 @@ long s3Size(const char *object_name) { return size; } + +#else + +int32_t s3Init() { return 0; } +void s3CleanUp() {} +void s3PutObjectFromFile(const char *file, const char *object) {} +void s3DeleteObjects(const char *object_name[], int nobject) {} +bool s3Exists(const char *object_name) { return false; } +bool s3Get(const char *object_name, const char *path) { return false; } +void s3EvictCache(const char *path, long object_size) {} +long s3Size(const char *object_name) { return 0; } + +#endif From 91710b0c0f243e8c8f00a65b637e231d36801f33 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 14 Aug 2023 14:56:17 +0800 Subject: [PATCH 095/147] fix(stream): --- source/dnode/vnode/src/tq/tq.c | 2 ++ source/libs/stream/src/streamRecover.c | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 64715122f2..ddd0c49649 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1274,6 +1274,8 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { pTask->tsInfo.step2Start = taosGetTimestampMs(); qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, 0.0); appendTranstateIntoInputQ(pTask); + atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); + streamSchedExec(pTask); } else { STimeWindow* pWindow = &pTask->dataRange.window; tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64 " window:%" PRId64 "-%" PRId64 diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 42ff9b9b4e..c3d4d4c7ae 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -401,8 +401,6 @@ int32_t appendTranstateIntoInputQ(SStreamTask* pTask) { pTask->status.appendTranstateBlock = true; qDebug("s-task:%s set sched-status:%d, prev:%d", pTask->id.idStr, TASK_SCHED_STATUS__INACTIVE, pTask->status.schedStatus); - streamSchedExec(pTask); - return TSDB_CODE_SUCCESS; } From 989abc2bf6264ea6c2837c3daa44027581405807 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 14 Aug 2023 15:03:17 +0800 Subject: [PATCH 096/147] vnode/cos: move includes into USE_COS --- source/dnode/vnode/src/vnd/vnodeCos.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 52d5fc2b1b..b28b7ad747 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -2,10 +2,6 @@ #include "vndCos.h" -#include "cos_api.h" -#include "cos_http_io.h" -#include "cos_log.h" - extern char tsS3Endpoint[]; extern char tsS3AccessKeyId[]; extern char tsS3AccessKeySecret[]; @@ -13,6 +9,10 @@ extern char tsS3BucketName[]; extern char tsS3AppId[]; #ifdef USE_COS +#include "cos_api.h" +#include "cos_http_io.h" +#include "cos_log.h" + int32_t s3Init() { if (cos_http_io_initialize(NULL, 0) != COSE_OK) { return -1; From a1e554fbf32b68496f63a5a9c9a0d5fe41d25dfd Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 14 Aug 2023 15:05:13 +0800 Subject: [PATCH 097/147] refactor: exec directly not asynchnoized. --- source/dnode/vnode/src/tq/tq.c | 3 +-- source/libs/stream/src/streamExec.c | 36 ++++++----------------------- 2 files changed, 8 insertions(+), 31 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index ddd0c49649..815e9647b5 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1274,8 +1274,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { pTask->tsInfo.step2Start = taosGetTimestampMs(); qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, 0.0); appendTranstateIntoInputQ(pTask); - atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); - streamSchedExec(pTask); + streamTryExec(pTask); // exec directly } else { STimeWindow* pWindow = &pTask->dataRange.window; tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64 " window:%" PRId64 "-%" PRId64 diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 37c5808e02..ccfa331661 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -661,36 +661,14 @@ int32_t streamTryExec(SStreamTask* pTask) { } // todo the task should be commit here -// if (taosQueueEmpty(pTask->inputQueue->queue)) { - // fill-history WAL scan has completed -// if (pTask->status.transferState) { -// code = streamTransferStateToStreamTask(pTask); -// if (code != TSDB_CODE_SUCCESS) { -// atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); -// return code; -// } + atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); + qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), + pTask->status.schedStatus); - // the schedStatus == TASK_SCHED_STATUS__ACTIVE, streamSchedExec cannot be executed, so execute once again by - // call this function (streamExecForAll) directly. - // code = streamExecForAll(pTask); - // if (code < 0) { - // do nothing - // } -// } - -// atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); -// qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, -// streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus); -// } else { - atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); - qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), - pTask->status.schedStatus); - - if (!(taosQueueEmpty(pTask->inputQueue->queue) || streamTaskShouldStop(&pTask->status) || - streamTaskShouldPause(&pTask->status))) { - streamSchedExec(pTask); - } -// } + if (!(taosQueueEmpty(pTask->inputQueue->queue) || streamTaskShouldStop(&pTask->status) || + streamTaskShouldPause(&pTask->status))) { + streamSchedExec(pTask); + } } else { qDebug("s-task:%s already started to exec by other thread, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), pTask->status.schedStatus); From 26951294cfa76ef77a1b185346a9cf6637982967 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 14 Aug 2023 15:10:18 +0800 Subject: [PATCH 098/147] Update cases.task --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index a946a7feaf..0a65340976 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -456,7 +456,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3 ,,n,system-test,python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3 -#,,n,system-test,python ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 +,,n,system-test,python3 ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 From 5c276fa547c0b5ba9a7a7332968ecc89f2b51105 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Fri, 11 Aug 2023 17:15:17 +0800 Subject: [PATCH 099/147] fix: make kill query work for sysscanoperator --- source/libs/executor/inc/querytask.h | 2 +- source/libs/executor/inc/tsort.h | 3 ++- source/libs/executor/src/querytask.c | 2 +- source/libs/executor/src/scanoperator.c | 5 +++-- source/libs/executor/src/sysscanoperator.c | 5 +++++ source/libs/executor/src/tsort.c | 13 ++++++++++--- 6 files changed, 22 insertions(+), 8 deletions(-) diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index 7241b015a0..0742b9ba4c 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -99,7 +99,7 @@ struct SExecTaskInfo { void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst); SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model, SStorageAPI* pAPI); void doDestroyTask(SExecTaskInfo* pTaskInfo); -bool isTaskKilled(SExecTaskInfo* pTaskInfo); +bool isTaskKilled(void* pTaskInfo); void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode); void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status); int32_t createExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId, diff --git a/source/libs/executor/inc/tsort.h b/source/libs/executor/inc/tsort.h index 57c8bce275..3180173ca7 100644 --- a/source/libs/executor/inc/tsort.h +++ b/source/libs/executor/inc/tsort.h @@ -191,7 +191,8 @@ int32_t getProperSortPageSize(size_t rowSize, uint32_t numOfCols); bool tsortIsClosed(SSortHandle* pHandle); void tsortSetClosed(SSortHandle* pHandle); -void setSingleTableMerge(SSortHandle* pHandle); +void tsortSetSingleTableMerge(SSortHandle* pHandle); +void tsortSetAbortCheckFn(SSortHandle* pHandle, bool (*checkFn)(void* param), void* param); #ifdef __cplusplus } diff --git a/source/libs/executor/src/querytask.c b/source/libs/executor/src/querytask.c index 22d171e74a..980ef1a61a 100644 --- a/source/libs/executor/src/querytask.c +++ b/source/libs/executor/src/querytask.c @@ -59,7 +59,7 @@ SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOP return pTaskInfo; } -bool isTaskKilled(SExecTaskInfo* pTaskInfo) { return (0 != pTaskInfo->code); } +bool isTaskKilled(void* pTaskInfo) { return (0 != ((SExecTaskInfo*)pTaskInfo)->code); } void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode) { pTaskInfo->code = rspCode; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71b0747be8..a3c5a76a7f 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2928,8 +2928,9 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize; pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_BLOCK_TS_MERGE, pInfo->bufPageSize, numOfBufPage, pInfo->pSortInputBlock, pTaskInfo->id.str, 0, 0, 0); - + tsortSetMergeLimit(pInfo->pSortHandle, mergeLimit); + tsortSetAbortCheckFn(pInfo->pSortHandle, isTaskKilled, pOperator->pTaskInfo); } tsortSetFetchRawDataFp(pInfo->pSortHandle, getBlockForTableMergeScan, NULL, NULL); @@ -2949,7 +2950,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) { int32_t code = TSDB_CODE_SUCCESS; if (numOfTable == 1) { - setSingleTableMerge(pInfo->pSortHandle); + tsortSetSingleTableMerge(pInfo->pSortHandle); } else { code = tsortOpen(pInfo->pSortHandle); } diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index a1f83dda2f..9048dd43d7 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -1601,6 +1601,11 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { SSysTableScanInfo* pInfo = pOperator->info; char dbName[TSDB_DB_NAME_LEN] = {0}; + if (isTaskKilled(pOperator->pTaskInfo)) { + setOperatorCompleted(pOperator); + return NULL; + } + blockDataCleanup(pInfo->pRes); const char* name = tNameGetTableName(&pInfo->name); diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 1891e93c61..6c4a780dfb 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -71,12 +71,20 @@ struct SSortHandle { SMultiwayMergeTreeInfo* pMergeTree; bool singleTableMerge; + + bool (*abortCheckFn)(void* param); + void* abortCheckParam; }; -void setSingleTableMerge(SSortHandle* pHandle) { +void tsortSetSingleTableMerge(SSortHandle* pHandle) { pHandle->singleTableMerge = true; } +void tsortSetAbortCheckFn(SSortHandle *pHandle, bool (*checkFn)(void *), void* param) { + pHandle->abortCheckFn = checkFn; + pHandle->abortCheckParam = param; +} + static int32_t msortComparFn(const void* pLeft, const void* pRight, void* param); // | offset[0] | offset[1] |....| nullbitmap | data |...| @@ -726,11 +734,10 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) { SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t)); while (1) { - if (tsortIsClosed(pHandle)) { + if (tsortIsClosed(pHandle) || (pHandle->abortCheckFn && pHandle->abortCheckFn(pHandle->abortCheckParam))) { code = terrno = TSDB_CODE_TSC_QUERY_CANCELLED; return code; } - SSDataBlock* pDataBlock = getSortedBlockDataInner(pHandle, &pHandle->cmpParam, numOfRows); if (pDataBlock == NULL) { break; From 57d1957dee1d8a738dfe0f2fe3efd9716433a280 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 14 Aug 2023 15:57:27 +0800 Subject: [PATCH 100/147] enhance: tag scan code refactoring --- source/libs/executor/inc/executorInt.h | 2 + source/libs/executor/src/scanoperator.c | 110 +++++++++--------------- 2 files changed, 43 insertions(+), 69 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 2b25feabb3..cb066d809c 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -263,6 +263,8 @@ typedef struct STagScanInfo { void* pCtbCursor; SNode* pTagCond; SNode* pTagIndexCond; + SArray* aUidTags; // SArray + SArray* aFilterIdxs; // SArray SStorageAPI* pStorageAPI; } STagScanInfo; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ac20bae167..71352b1c6e 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2813,7 +2813,7 @@ static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo // } else { // name is not retrieved during filter // pAPI->metaFn.getTableNameByUid(pVnode, pUidTagInfo->uid, str); // } - STR_TO_VARSTR(str, "zsl"); + STR_TO_VARSTR(str, "ctbidx"); colDataSetVal(pColInfo, rowIndex, str, false); } else { @@ -2841,66 +2841,32 @@ static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo } } -static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, +static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, SArray* aFilterIdxs, bool ignoreFilterIdx, SStorageAPI* pAPI) { STagScanInfo* pInfo = pOperator->info; SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; - - size_t szTables = taosArrayGetSize(aFilterIdxs); - for (int i = 0; i < szTables; ++i) { - int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); - STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); - for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { - SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); - tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + if (!ignoreFilterIdx) { + size_t szTables = taosArrayGetSize(aFilterIdxs); + for (int i = 0; i < szTables; ++i) { + int32_t idx = *(int32_t*)taosArrayGet(aFilterIdxs, i); + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, idx); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); + } } - } - return 0; -} - -#if 0 -static int32_t tagScanFillResultBlock(SOperatorInfo* pOperator, SSDataBlock* pRes, SArray* aUidTags, - SStorageAPI* pAPI) { - STagScanInfo* pInfo = pOperator->info; - SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0]; - - int32_t nTbls = taosArrayGetSize(aUidTags); - for (int i = 0; i < nTbls; ++i) { - STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); - for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { - SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); - - // refactor later - if (fmIsScanPseudoColumnFunc(pExprInfo[j].pExpr->_function.functionId)) { - char str[512]; - - STR_TO_VARSTR(str, "zsl"); - colDataSetVal(pDst, (i), str, false); - } else { // it is a tag value - STagVal val = {0}; - val.cid = pExprInfo[j].base.pParam[0].pCol->colId; - const char* p = pAPI->metaFn.extractTagVal(pUidTagInfo->pTagVal, pDst->info.type, &val); - - char* data = NULL; - if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) { - data = tTagValToData((const STagVal*)p, false); - } else { - data = (char*)p; - } - colDataSetVal(pDst, i, data, - (data == NULL) || (pDst->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data))); - - if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) && - data != NULL) { - taosMemoryFree(data); - } + } else { + size_t szTables = taosArrayGetSize(aUidTags); + for (int i = 0; i < szTables; ++i) { + STUidTagInfo* pUidTagInfo = taosArrayGet(aUidTags, i); + for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) { + SColumnInfoData* pDst = taosArrayGet(pRes->pDataBlock, pExprInfo[j].base.resSchema.slotId); + tagScanFillOneCellWithTag(pUidTagInfo, &pExprInfo[j], pDst, i, pAPI, pInfo->readHandle.vnode); } } } return 0; } -#endif - static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { @@ -2912,16 +2878,19 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { STagScanInfo* pInfo = pOperator->info; SSDataBlock* pRes = pInfo->pRes; blockDataCleanup(pRes); - int32_t count = 0; if (pInfo->pCtbCursor == NULL) { pInfo->pCtbCursor = pAPI->metaFn.openCtbCursor(pInfo->readHandle.vnode, pInfo->suid, 1); } - SArray* aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); - SArray* aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + SArray* aUidTags = pInfo->aUidTags; + SArray* aFilterIdxs = pInfo->aFilterIdxs; + int32_t count = 0; while (1) { + taosArrayClearEx(aUidTags, tagScanFreeUidTag); + taosArrayClear(aFilterIdxs); + int32_t numTables = 0; while (numTables < pOperator->resultInfo.capacity) { SMCtbCursor* pCur = pInfo->pCtbCursor; @@ -2939,34 +2908,29 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { if (numTables == 0) { break; } + bool ignoreFilterIdx = true; if (pInfo->pTagCond != NULL) { + ignoreFilterIdx = false; tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI); } else { - for (int i = 0; i < numTables; ++i) { - taosArrayPush(aFilterIdxs, &i); - } + ignoreFilterIdx = true; } - tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, pAPI); - count = taosArrayGetSize(aFilterIdxs); + tagScanFillResultBlock(pOperator, pRes, aUidTags, aFilterIdxs, ignoreFilterIdx, pAPI); + + count = ignoreFilterIdx ? taosArrayGetSize(aUidTags): taosArrayGetSize(aFilterIdxs); if (count != 0) { break; } - - taosArrayClearEx(aUidTags, tagScanFreeUidTag); - taosArrayClear(aFilterIdxs); } - - taosArrayDestroy(aFilterIdxs); - taosArrayDestroyEx(aUidTags, tagScanFreeUidTag); - + pRes->info.rows = count; pOperator->resultInfo.totalRows += count; return (pRes->info.rows == 0) ? NULL : pInfo->pRes; } -static SSDataBlock* doTagScan(SOperatorInfo* pOperator) { +static SSDataBlock* doTagScanFromMetaEntry(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; } @@ -3027,6 +2991,10 @@ static void destroyTagScanOperatorInfo(void* param) { if (pInfo->pCtbCursor != NULL) { pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); } + + taosArrayDestroy(pInfo->aFilterIdxs); + taosArrayDestroyEx(pInfo->aUidTags, tagScanFreeUidTag); + pInfo->pRes = blockDataDestroy(pInfo->pRes); taosArrayDestroy(pInfo->matchInfo.pList); pInfo->pTableListInfo = tableListDestroy(pInfo->pTableListInfo); @@ -3072,7 +3040,11 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); - __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScan; + if (pPhyNode->onlyMetaCtbIdx) { + pInfo->aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); + pInfo->aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + } + __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); From cf9f9ab4718c8420c72ea5767e097ba3c0b1c19c Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 14 Aug 2023 08:01:20 +0000 Subject: [PATCH 101/147] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndIndex.c | 70 ++++++-- source/dnode/vnode/src/meta/metaQuery.c | 14 +- source/dnode/vnode/src/meta/metaTable.c | 9 +- source/libs/parser/src/parTranslater.c | 229 +++++++++++++----------- 4 files changed, 191 insertions(+), 131 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndIndex.c b/source/dnode/mnode/impl/src/mndIndex.c index 2157804559..b56ea320cc 100644 --- a/source/dnode/mnode/impl/src/mndIndex.c +++ b/source/dnode/mnode/impl/src/mndIndex.c @@ -79,9 +79,12 @@ int32_t mndInitIdx(SMnode *pMnode) { return sdbSetTable(pMnode->pSdb, table); } -static int32_t mndFindSuperTableTagId(const SStbObj *pStb, const char *tagName) { +static int32_t mndFindSuperTableTagId(const SStbObj *pStb, const char *tagName, int8_t *hasIdx) { for (int32_t tag = 0; tag < pStb->numOfTags; tag++) { if (strcasecmp(pStb->pTags[tag].name, tagName) == 0) { + if (IS_IDX_ON(&pStb->pTags[tag])) { + *hasIdx = 1; + } return tag; } } @@ -597,7 +600,8 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb pNew->updateTime = taosGetTimestampMs(); pNew->lock = 0; - int32_t tag = mndFindSuperTableTagId(pOld, tagName); + int8_t hasIdx = 0; + int32_t tag = mndFindSuperTableTagId(pOld, tagName, &hasIdx); if (tag < 0) { terrno = TSDB_CODE_MND_TAG_NOT_EXIST; return -1; @@ -612,14 +616,14 @@ static int32_t mndSetUpdateIdxStbCommitLogs(SMnode *pMnode, STrans *pTrans, SStb SSchema *pTag = pNew->pTags + tag; if (on == 1) { - if (IS_IDX_ON(pTag)) { + if (hasIdx && tag != 0) { terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; return -1; } else { SSCHMEA_SET_IDX_ON(pTag); } } else { - if (!IS_IDX_ON(pTag)) { + if (hasIdx == 0) { terrno = TSDB_CODE_MND_SMA_NOT_EXIST; } else { SSCHMEA_SET_IDX_OFF(pTag); @@ -667,7 +671,42 @@ _OVER: mndTransDrop(pTrans); return code; } +int8_t mndCheckIndexNameByTagName(SMnode *pMnode, SIdxObj *pIdxObj) { + // build index on first tag, and no index name; + int8_t exist = 0; + SDbObj *pDb = NULL; + if (strlen(pIdxObj->db) > 0) { + pDb = mndAcquireDb(pMnode, pIdxObj->db); + if (pDb == NULL) return 0; + } + SSmaAndTagIter *pIter = NULL; + SIdxObj *pIdx = NULL; + SSdb *pSdb = pMnode->pSdb; + while (1) { + pIter = sdbFetch(pSdb, SDB_IDX, pIter, (void **)&pIdx); + if (pIter == NULL) break; + + if (NULL != pDb && pIdx->dbUid != pDb->uid) { + sdbRelease(pSdb, pIdx); + continue; + } + if (pIdxObj->stbUid != pIdx->stbUid) { + sdbRelease(pSdb, pIdx); + continue; + } + if (strncmp(pIdxObj->colName, pIdx->colName, TSDB_COL_NAME_LEN) == 0) { + sdbRelease(pSdb, pIdx); + sdbCancelFetch(pSdb, pIdx); + exist = 1; + break; + } + sdbRelease(pSdb, pIdx); + } + + mndReleaseDb(pMnode, pDb); + return exist; +} static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *req, SDbObj *pDb, SStbObj *pStb) { int32_t code = -1; SIdxObj idxObj = {0}; @@ -681,11 +720,20 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re idxObj.stbUid = pStb->uid; idxObj.dbUid = pStb->dbUid; - int32_t tag = mndFindSuperTableTagId(pStb, req->colName); + int8_t hasIdx = 0; + int32_t tag = mndFindSuperTableTagId(pStb, req->colName, &hasIdx); if (tag < 0) { terrno = TSDB_CODE_MND_TAG_NOT_EXIST; return -1; - } else if (tag == 0) { + } + int8_t exist = 0; + if (tag == 0 && hasIdx == 1) { + exist = mndCheckIndexNameByTagName(pMnode, &idxObj); + if (exist) { + terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; + return -1; + } + } else if (hasIdx == 1) { terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; return -1; } @@ -695,11 +743,11 @@ static int32_t mndAddIndex(SMnode *pMnode, SRpcMsg *pReq, SCreateTagIndexReq *re return -1; } - SSchema *pTag = pStb->pTags + tag; - if (IS_IDX_ON(pTag)) { - terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; - return -1; - } + // SSchema *pTag = pStb->pTags + tag; + // if (IS_IDX_ON(pTag)) { + // terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; + // return -1; + // } code = mndAddIndexImpl(pMnode, pReq, pDb, pStb, &idxObj); return code; diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index c26bb45c2b..389994ce1d 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -17,8 +17,8 @@ #include "osMemory.h" #include "tencode.h" -void _metaReaderInit(SMetaReader* pReader, void* pVnode, int32_t flags, SStoreMeta* pAPI) { - SMeta* pMeta = ((SVnode*)pVnode)->pMeta; +void _metaReaderInit(SMetaReader *pReader, void *pVnode, int32_t flags, SStoreMeta *pAPI) { + SMeta *pMeta = ((SVnode *)pVnode)->pMeta; metaReaderDoInit(pReader, pMeta, flags); pReader->pAPI = pAPI; } @@ -143,7 +143,7 @@ tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) { int metaGetTableNameByUid(void *pVnode, uint64_t uid, char *tbName) { int code = 0; SMetaReader mr = {0}; - metaReaderDoInit(&mr, ((SVnode*)pVnode)->pMeta, 0); + metaReaderDoInit(&mr, ((SVnode *)pVnode)->pMeta, 0); code = metaReaderGetTableEntryByUid(&mr, uid); if (code < 0) { metaReaderClear(&mr); @@ -195,7 +195,7 @@ int metaGetTableUidByName(void *pVnode, char *tbName, uint64_t *uid) { int metaGetTableTypeByName(void *pVnode, char *tbName, ETableType *tbType) { int code = 0; SMetaReader mr = {0}; - metaReaderDoInit(&mr, ((SVnode*)pVnode)->pMeta, 0); + metaReaderDoInit(&mr, ((SVnode *)pVnode)->pMeta, 0); code = metaGetTableEntryByName(&mr, tbName); if (code == 0) *tbType = mr.me.type; @@ -244,7 +244,7 @@ SMTbCursor *metaOpenTbCursor(void *pVnode) { return NULL; } - SVnode* pVnodeObj = pVnode; + SVnode *pVnodeObj = pVnode; // tdbTbcMoveToFirst((TBC *)pTbCur->pDbc); pTbCur->pMeta = pVnodeObj->pMeta; pTbCur->paused = 1; @@ -1139,7 +1139,7 @@ int32_t metaFilterTtl(void *pVnode, SMetaFltParam *arg, SArray *pUids) { pCursor->type = param->type; metaRLock(pMeta); - //ret = tdbTbcOpen(pMeta->pTtlIdx, &pCursor->pCur, NULL); + // ret = tdbTbcOpen(pMeta->pTtlIdx, &pCursor->pCur, NULL); END: if (pCursor->pMeta) metaULock(pCursor->pMeta); @@ -1194,7 +1194,7 @@ int32_t metaFilterTableIds(void *pVnode, SMetaFltParam *arg, SArray *pUids) { ret = -1; for (int i = 0; i < oStbEntry.stbEntry.schemaTag.nCols; i++) { SSchema *schema = oStbEntry.stbEntry.schemaTag.pSchema + i; - if (schema->colId == param->cid && param->type == schema->type && (IS_IDX_ON(schema) || i == 0)) { + if (schema->colId == param->cid && param->type == schema->type && (IS_IDX_ON(schema))) { ret = 0; } } diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 632e6dd872..f56837f759 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -450,12 +450,13 @@ int metaAddIndexToSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) { goto _err; } if (IS_IDX_ON(pNew) && !IS_IDX_ON(pOld)) { - if (diffIdx != -1) goto _err; + // if (diffIdx != -1) goto _err; diffIdx = i; + break; } } - if (diffIdx == -1 || diffIdx == 0) { + if (diffIdx == -1) { goto _err; } @@ -586,7 +587,7 @@ int metaDropIndexFromSTable(SMeta *pMeta, int64_t version, SDropIndexReq *pReq) for (int i = 0; i < oStbEntry.stbEntry.schemaTag.nCols; i++) { SSchema *schema = oStbEntry.stbEntry.schemaTag.pSchema + i; if (0 == strncmp(schema->name, pReq->colName, sizeof(pReq->colName))) { - if (i != 0 || IS_IDX_ON(schema)) { + if (IS_IDX_ON(schema)) { pCol = schema; } break; @@ -2094,7 +2095,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { } else { for (int i = 0; i < pTagSchema->nCols; i++) { pTagColumn = &pTagSchema->pSchema[i]; - if (i != 0 && !IS_IDX_ON(pTagColumn)) continue; + if (!IS_IDX_ON(pTagColumn)) continue; STagVal tagVal = {.cid = pTagColumn->colId}; tTagGet((const STag *)pCtbEntry->ctbEntry.pTags, &tagVal); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 38118c03f8..d2dc1f3320 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -31,8 +31,8 @@ #define SYSTABLE_SHOW_TYPE_OFFSET QUERY_NODE_SHOW_DNODES_STMT typedef struct SRewriteTbNameContext { - int32_t errCode; - char* pTbName; + int32_t errCode; + char* pTbName; } SRewriteTbNameContext; typedef struct STranslateContext { @@ -54,7 +54,7 @@ typedef struct STranslateContext { bool stableQuery; bool showRewrite; SNode* pPrevRoot; - SNode* pPostRoot; + SNode* pPostRoot; } STranslateContext; typedef struct SBuildTopicContext { @@ -278,10 +278,11 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = { static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode); static int32_t translateQuery(STranslateContext* pCxt, SNode* pNode); static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal); -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt); -static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); -static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); -static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt); +static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta, SNode** pQuery); +static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery); +static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery); static bool afterGroupBy(ESqlClause clause) { return clause > SQL_CLAUSE_GROUP_BY; } @@ -772,7 +773,8 @@ static SNodeList* getProjectList(const SNode* pNode) { static bool isTimeLineQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { - return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); + return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || + (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode); } else if (QUERY_NODE_SET_OPERATOR == nodeType(pStmt)) { return TIME_LINE_GLOBAL == ((SSetOperator*)pStmt)->timeLineResMode; } else { @@ -791,7 +793,7 @@ static bool isGlobalTimeLineQuery(SNode* pStmt) { } static bool isTimeLineAlignedQuery(SNode* pStmt) { - SSelectStmt *pSelect = (SSelectStmt *)pStmt; + SSelectStmt* pSelect = (SSelectStmt*)pStmt; if (isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return true; } @@ -801,7 +803,7 @@ static bool isTimeLineAlignedQuery(SNode* pStmt) { if (QUERY_NODE_SELECT_STMT != nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { return false; } - SSelectStmt *pSub = (SSelectStmt *)((STempTableNode*)pSelect->pFromTable)->pSubquery; + SSelectStmt* pSub = (SSelectStmt*)((STempTableNode*)pSelect->pFromTable)->pSubquery; if (nodesListMatch(pSelect->pPartitionByList, pSub->pPartitionByList)) { return true; } @@ -822,18 +824,18 @@ static bool isPrimaryKeyImpl(SNode* pExpr) { return true; } } else if (QUERY_NODE_OPERATOR == nodeType(pExpr)) { - SOperatorNode* pOper = (SOperatorNode*)pExpr; - if (OP_TYPE_ADD != pOper->opType && OP_TYPE_SUB != pOper->opType) { - return false; - } - if (!isPrimaryKeyImpl(pOper->pLeft)) { - return false; - } - if (QUERY_NODE_VALUE != nodeType(pOper->pRight)) { - return false; - } - return true; + SOperatorNode* pOper = (SOperatorNode*)pExpr; + if (OP_TYPE_ADD != pOper->opType && OP_TYPE_SUB != pOper->opType) { + return false; } + if (!isPrimaryKeyImpl(pOper->pLeft)) { + return false; + } + if (QUERY_NODE_VALUE != nodeType(pOper->pRight)) { + return false; + } + return true; + } return false; } @@ -860,7 +862,7 @@ static void setColumnInfoBySchema(const SRealTableNode* pTable, const SSchema* p pCol->tableType = pTable->pMeta->tableType; pCol->colId = pColSchema->colId; pCol->colType = (tagFlag >= 0 ? COLUMN_TYPE_TAG : COLUMN_TYPE_COLUMN); - pCol->hasIndex = ((0 == tagFlag) || (pColSchema != NULL && IS_IDX_ON(pColSchema))); + pCol->hasIndex = (pColSchema != NULL && IS_IDX_ON(pColSchema)); pCol->node.resType.type = pColSchema->type; pCol->node.resType.bytes = pColSchema->bytes; if (TSDB_DATA_TYPE_TIMESTAMP == pCol->node.resType.type) { @@ -1406,7 +1408,7 @@ static bool isCountStar(SFunctionNode* pFunc) { } static int32_t rewriteCountStarAsCount1(STranslateContext* pCxt, SFunctionNode* pCount) { - int32_t code = TSDB_CODE_SUCCESS; + int32_t code = TSDB_CODE_SUCCESS; SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { return TSDB_CODE_OUT_OF_MEMORY; @@ -1608,9 +1610,11 @@ static int32_t translateInterpFunc(STranslateContext* pCxt, SFunctionNode* pFunc return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } - if (pSelect->hasInterpFunc && (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { + if (pSelect->hasInterpFunc && + (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc))) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s ignoring null value options cannot be used when applying to multiple columns", pFunc->functionName); + "%s ignoring null value options cannot be used when applying to multiple columns", + pFunc->functionName); } if (NULL != pSelect->pWindow || NULL != pSelect->pGroupByList) { @@ -1648,7 +1652,8 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu } SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && - !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { + !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && + !isTimeLineAlignedQuery(pCxt->pCurrStmt)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, "%s function requires valid time series input", pFunc->functionName); } @@ -1718,8 +1723,8 @@ static int32_t translateForbidSysTableFunc(STranslateContext* pCxt, SFunctionNod return TSDB_CODE_SUCCESS; } - SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; - SNode* pTable = pSelect->pFromTable; + SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; + SNode* pTable = pSelect->pFromTable; if (NULL != pTable && QUERY_NODE_REAL_TABLE == nodeType(pTable) && TSDB_SYSTEM_TABLE == ((SRealTableNode*)pTable)->pMeta->tableType) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, pFunc->functionName); @@ -2308,7 +2313,8 @@ static EDealRes doCheckExprForGroupBy(SNode** pNode, void* pContext) { } } if (isScanPseudoColumnFunc(*pNode) || QUERY_NODE_COLUMN == nodeType(*pNode)) { - if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { + if (pSelect->selectFuncNum > 1 || pSelect->hasOtherVectorFunc || !pSelect->hasSelectFunc || + (isDistinctOrderBy(pCxt) && pCxt->currClause == SQL_CLAUSE_ORDER_BY)) { return generateDealNodeErrMsg(pCxt, getGroupByErrorCode(pCxt), ((SExprNode*)(*pNode))->userAlias); } else { return rewriteColToSelectValFunc(pCxt, pNode); @@ -2403,14 +2409,14 @@ static int32_t checkHavingGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) if (NULL != pSelect->pHaving) { code = checkExprForGroupBy(pCxt, &pSelect->pHaving); } -/* - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); - } - if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { - code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pProjectionList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); + } + if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pOrderByList) { + code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pOrderByList); + } + */ return code; } @@ -2669,10 +2675,10 @@ static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSele static EDealRes doTranslateTbName(SNode** pNode, void* pContext) { switch (nodeType(*pNode)) { case QUERY_NODE_FUNCTION: { - SFunctionNode *pFunc = (SFunctionNode *)*pNode; + SFunctionNode* pFunc = (SFunctionNode*)*pNode; if (FUNCTION_TYPE_TBNAME == pFunc->funcType) { - SRewriteTbNameContext *pCxt = (SRewriteTbNameContext*)pContext; - SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); + SRewriteTbNameContext* pCxt = (SRewriteTbNameContext*)pContext; + SValueNode* pVal = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE); if (NULL == pVal) { pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; return DEAL_RES_ERROR; @@ -2711,11 +2717,12 @@ static int32_t replaceTbName(STranslateContext* pCxt, SSelectStmt* pSelect) { } SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable; - if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { + if (TSDB_CHILD_TABLE != pTable->pMeta->tableType && TSDB_NORMAL_TABLE != pTable->pMeta->tableType && + TSDB_SYSTEM_TABLE != pTable->pMeta->tableType) { return TSDB_CODE_SUCCESS; } - SNode** pNode = NULL; + SNode** pNode = NULL; SRewriteTbNameContext pRewriteCxt = {0}; pRewriteCxt.pTbName = pTable->table.tableName; @@ -3122,7 +3129,8 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList code = scalarCalculateConstants(pCastFunc, &pCell->pNode); } if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) { - code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); + code = + generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); } else if (TSDB_CODE_SUCCESS != code) { code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch"); } @@ -3588,7 +3596,6 @@ static int32_t createDefaultEveryNode(STranslateContext* pCxt, SNode** pOutput) pEvery->isDuration = true; pEvery->literal = taosStrdup("1s"); - *pOutput = (SNode*)pEvery; return TSDB_CODE_SUCCESS; } @@ -3683,15 +3690,15 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->currClause = SQL_CLAUSE_PARTITION_BY; int32_t code = TSDB_CODE_SUCCESS; - + if (pSelect->pPartitionByList) { int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable); SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0); - if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && - 1 == pSelect->pPartitionByList->length && (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { + if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) && 1 == pSelect->pPartitionByList->length && + (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { pSelect->timeLineResMode = TIME_LINE_MULTI; } - + code = translateExprList(pCxt, pSelect->pPartitionByList); } if (TSDB_CODE_SUCCESS == code) { @@ -3955,9 +3962,9 @@ static int32_t translateSetOperProject(STranslateContext* pCxt, SSetOperator* pS } snprintf(pRightExpr->aliasName, sizeof(pRightExpr->aliasName), "%s", pLeftExpr->aliasName); SNode* pProj = createSetOperProject(pSetOperator->stmtName, pLeft); - if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) - && ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID - && ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { + if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight) && + ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID && + ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) { ((SColumnNode*)pProj)->colId = PRIMARYKEY_TIMESTAMP_COL_ID; } if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSetOperator->pProjectionList, pProj)) { @@ -5737,7 +5744,6 @@ static int32_t translateRestoreDnode(STranslateContext* pCxt, SRestoreComponentN return buildCmdMsg(pCxt, TDMT_MND_RESTORE_DNODE, (FSerializeFunc)tSerializeSRestoreDnodeReq, &restoreReq); } - static int32_t getSmaIndexDstVgId(STranslateContext* pCxt, const char* pDbName, const char* pTableName, int32_t* pVgId) { SVgroupInfo vg = {0}; @@ -5865,7 +5871,7 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS } static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { - int32_t code = checkCreateSmaIndex(pCxt, pStmt); + int32_t code = checkCreateSmaIndex(pCxt, pStmt); pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq)); if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY; if (TSDB_CODE_SUCCESS == code) { @@ -5879,13 +5885,15 @@ int32_t createIntervalFromCreateSmaIndexStmt(SCreateIndexStmt* pStmt, SInterval* pInterval->interval = ((SValueNode*)pStmt->pOptions->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit; pInterval->offset = NULL != pStmt->pOptions->pOffset ? ((SValueNode*)pStmt->pOptions->pOffset)->datum.i : 0; - pInterval->sliding = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; - pInterval->slidingUnit = NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; + pInterval->sliding = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pInterval->interval; + pInterval->slidingUnit = + NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pInterval->intervalUnit; pInterval->precision = pStmt->pOptions->tsPrecision; return TSDB_CODE_SUCCESS; } -int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void ** pResRow) { +int32_t translatePostCreateSmaIndex(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { int32_t code = TSDB_CODE_SUCCESS; SCreateIndexStmt* pStmt = (SCreateIndexStmt*)pQuery->pRoot; int64_t lastTs = 0; @@ -6053,7 +6061,7 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS toName(pCxt->pParseCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name); tNameGetFullDbName(&name, pReq->subDbName); tNameExtractFullName(&name, pReq->subStbName); - if(pStmt->pQuery != NULL) { + if (pStmt->pQuery != NULL) { code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL); } } else if ('\0' != pStmt->subDbName[0]) { @@ -6108,11 +6116,12 @@ static EDealRes checkColumnTagsInCond(SNode* pNode, void* pContext) { addTagList(&pCxt->pTags, nodesCloneNode(pNode)); } } - + return DEAL_RES_CONTINUE; } -static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, SNodeList** ppProjection) { +static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, + SNodeList** ppProjection) { SBuildTopicContext colCxt = {.colExists = false, .colNotFound = false, .pMeta = pMeta, .pTags = NULL}; nodesWalkExprPostOrder(pStmt->pWhere, checkColumnTagsInCond, &colCxt); if (colCxt.colNotFound) { @@ -6122,18 +6131,18 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* nodesDestroyList(colCxt.pTags); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Columns are forbidden in where clause"); } - if (NULL == colCxt.pTags) { // put one column to select -// for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { - SSchema* column = &pMeta->schema[0]; - SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); - if (NULL == col) { - return TSDB_CODE_OUT_OF_MEMORY; - } - strcpy(col->colName, column->name); - strcpy(col->node.aliasName, col->colName); - strcpy(col->node.userAlias, col->colName); - addTagList(&colCxt.pTags, (SNode*)col); -// } + if (NULL == colCxt.pTags) { // put one column to select + // for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { + SSchema* column = &pMeta->schema[0]; + SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN); + if (NULL == col) { + return TSDB_CODE_OUT_OF_MEMORY; + } + strcpy(col->colName, column->name); + strcpy(col->node.aliasName, col->colName); + strcpy(col->node.userAlias, col->colName); + addTagList(&colCxt.pTags, (SNode*)col); + // } } *ppProjection = colCxt.pTags; @@ -6141,13 +6150,13 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* } static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt* pStmt, SNode** pSelect) { - SParseContext* pParCxt = pCxt->pParseCxt; - SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, - .requestId = pParCxt->requestId, + SParseContext* pParCxt = pCxt->pParseCxt; + SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter, + .requestId = pParCxt->requestId, .requestObjRefId = pParCxt->requestRid, .mgmtEps = pParCxt->mgmtEpSet}; - SName name; - STableMeta* pMeta = NULL; + SName name; + STableMeta* pMeta = NULL; int32_t code = getTableMetaImpl(pCxt, toName(pParCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name), &pMeta); if (code) { taosMemoryFree(pMeta); @@ -6156,7 +6165,7 @@ static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt if (TSDB_SUPER_TABLE != pMeta->tableType) { taosMemoryFree(pMeta); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Only supertable table can be used"); - } + } SNodeList* pProjection = NULL; code = checkCollectTopicTags(pCxt, pStmt, pMeta, &pProjection); @@ -6554,7 +6563,8 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must be of VARCHAR type"); } - if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { + if (NULL != pSelect->pSubtable && 0 == LIST_LENGTH(pSelect->pPartitionByList) && + subtableExprHasColumnOrPseudoColumn(pSelect->pSubtable)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "SUBTABLE expression must not has column when no partition by clause"); } @@ -6910,28 +6920,28 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta if (NULL == col) { return TSDB_CODE_OUT_OF_MEMORY; } - + strcpy(col->tableAlias, pTable); strcpy(col->colName, pMeta->schema[0].name); SNodeList* pParamterList = nodesMakeList(); if (NULL == pParamterList) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); return TSDB_CODE_OUT_OF_MEMORY; } - - int32_t code = nodesListStrictAppend(pParamterList, (SNode *)col); + + int32_t code = nodesListStrictAppend(pParamterList, (SNode*)col); if (code) { - nodesDestroyNode((SNode *)col); + nodesDestroyNode((SNode*)col); nodesDestroyList(pParamterList); return code; } - + SNode* pFunc = (SNode*)createFunction("last", pParamterList); if (NULL == pFunc) { nodesDestroyList(pParamterList); return TSDB_CODE_OUT_OF_MEMORY; } - + SNodeList* pProjectionList = nodesMakeList(); if (NULL == pProjectionList) { nodesDestroyList(pParamterList); @@ -6943,8 +6953,8 @@ static int32_t createLastTsSelectStmt(char* pDb, char* pTable, STableMeta* pMeta nodesDestroyList(pProjectionList); return code; } - - code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt **)pQuery); + + code = createSimpleSelectStmtFromProjList(pDb, pTable, pProjectionList, (SSelectStmt**)pQuery); if (code) { nodesDestroyList(pProjectionList); return code; @@ -6982,14 +6992,14 @@ static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt if (TSDB_CODE_SUCCESS == code && pStmt->pOptions->fillHistory) { SRealTableNode* pTable = (SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable); code = createLastTsSelectStmt(pTable->table.dbName, pTable->table.tableName, pTable->pMeta, &pStmt->pPrevQuery); -/* - if (TSDB_CODE_SUCCESS == code) { - STranslateContext cxt = {0}; - int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); - code = translateQuery(&cxt, pStmt->pPrevQuery); - destroyTranslateContext(&cxt); - } -*/ + /* + if (TSDB_CODE_SUCCESS == code) { + STranslateContext cxt = {0}; + int32_t code = initTranslateContext(pCxt->pParseCxt, pCxt->pMetaCache, &cxt); + code = translateQuery(&cxt, pStmt->pPrevQuery); + destroyTranslateContext(&cxt); + } + */ } taosMemoryFree(pMeta); return code; @@ -7084,7 +7094,7 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow)) { return code; } - + SIntervalWindowNode* pWindow = (SIntervalWindowNode*)pSelect->pWindow; pInterval->interval = ((SValueNode*)pWindow->pInterval)->datum.i; pInterval->intervalUnit = ((SValueNode*)pWindow->pInterval)->unit; @@ -7092,16 +7102,16 @@ static int32_t buildIntervalForCreateStream(SCreateStreamStmt* pStmt, SInterval* pInterval->sliding = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->datum.i : pInterval->interval); pInterval->slidingUnit = (NULL != pWindow->pSliding ? ((SValueNode*)pWindow->pSliding)->unit : pInterval->intervalUnit); - pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; + pInterval->precision = ((SColumnNode*)pWindow->pCol)->node.resType.precision; return code; } int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void** pResRow) { SCreateStreamStmt* pStmt = (SCreateStreamStmt*)pQuery->pRoot; - STranslateContext cxt = {0}; - SInterval interval = {0}; - int64_t lastTs = 0; + STranslateContext cxt = {0}; + SInterval interval = {0}; + int64_t lastTs = 0; int32_t code = initTranslateContext(pParseCxt, NULL, &cxt); if (TSDB_CODE_SUCCESS == code) { @@ -7136,7 +7146,6 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, void return code; } - static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pStmt) { SMDropStreamReq dropReq = {0}; SName name; @@ -7261,7 +7270,7 @@ static int32_t translateGrantTagCond(STranslateContext* pCxt, SGrantStmt* pStmt, } } - int32_t code = createRealTableForGrantTable(pStmt, &pTable); + int32_t code = createRealTableForGrantTable(pStmt, &pTable); if (TSDB_CODE_SUCCESS == code) { SName name; code = getTableMetaImpl(pCxt, toName(pCxt->pParseCxt->acctId, pTable->table.dbName, pTable->table.tableName, &name), @@ -7821,7 +7830,8 @@ static SNodeList* createProjectCols(int32_t ncols, const char* const pCols[]) { return pProjections; } -static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { SSelectStmt* pSelect = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT); if (NULL == pSelect) { return TSDB_CODE_OUT_OF_MEMORY; @@ -7844,9 +7854,8 @@ static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, S return TSDB_CODE_SUCCESS; } - static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTable, int32_t numOfProjs, - const char* const pProjCol[], SSelectStmt** pStmt) { + const char* const pProjCol[], SSelectStmt** pStmt) { SNodeList* pProjectionList = NULL; if (numOfProjs >= 0) { pProjectionList = createProjectCols(numOfProjs, pProjCol); @@ -7858,13 +7867,15 @@ static int32_t createSimpleSelectStmtFromCols(const char* pDb, const char* pTabl return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } -static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { +static int32_t createSimpleSelectStmtFromProjList(const char* pDb, const char* pTable, SNodeList* pProjectionList, + SSelectStmt** pStmt) { return createSimpleSelectStmtImpl(pDb, pTable, pProjectionList, pStmt); } static int32_t createSelectStmtForShow(ENodeType showType, SSelectStmt** pStmt) { const SSysTableShowAdapter* pShow = &sysTableShowAdapter[showType - SYSTABLE_SHOW_TYPE_OFFSET]; - return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, pStmt); + return createSimpleSelectStmtFromCols(pShow->pDbName, pShow->pTableName, pShow->numOfShowCols, pShow->pShowCols, + pStmt); } static int32_t createSelectStmtForShowTableDist(SShowTableDistributedStmt* pStmt, SSelectStmt** pOutput) { @@ -8002,8 +8013,8 @@ static int32_t createShowTableTagsProjections(SNodeList** pProjections, SNodeLis static int32_t rewriteShowStableTags(STranslateContext* pCxt, SQuery* pQuery) { SShowTableTagsStmt* pShow = (SShowTableTagsStmt*)pQuery->pRoot; SSelectStmt* pSelect = NULL; - int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal, - -1, NULL, &pSelect); + int32_t code = createSimpleSelectStmtFromCols(((SValueNode*)pShow->pDbName)->literal, + ((SValueNode*)pShow->pTbName)->literal, -1, NULL, &pSelect); if (TSDB_CODE_SUCCESS == code) { code = createShowTableTagsProjections(&pSelect->pProjectionList, &pShow->pTags); } From 60191bef9eb67c3ff2c7c2281d35b06e3178c145 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 14 Aug 2023 16:24:56 +0800 Subject: [PATCH 102/147] fix:tmq interface & remove snapshot code --- docs/en/14-reference/03-connector/04-java.mdx | 3 -- docs/examples/c/tmq_example.c | 6 --- docs/examples/go/sub/main.go | 1 - docs/zh/08-connector/10-cpp.mdx | 9 ++++ examples/c/tmq.c | 5 -- include/client/taos.h | 54 ++++++++----------- 6 files changed, 31 insertions(+), 47 deletions(-) diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx index 69bbd287ed..b12ef38ea8 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -1091,8 +1091,6 @@ public abstract class ConsumerLoop { config.setProperty("client.id", "1"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); - config.setProperty("experimental.snapshot.enable", "true"); - this.consumer = new TaosConsumer<>(config); this.topics = Collections.singletonList("topic_speed"); @@ -1176,7 +1174,6 @@ public abstract class ConsumerLoop { config.setProperty("client.id", "1"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); - config.setProperty("experimental.snapshot.enable", "true"); this.consumer = new TaosConsumer<>(config); this.topics = Collections.singletonList("topic_speed"); diff --git a/docs/examples/c/tmq_example.c b/docs/examples/c/tmq_example.c index d958428b8f..ca7c23e93b 100644 --- a/docs/examples/c/tmq_example.c +++ b/docs/examples/c/tmq_example.c @@ -227,12 +227,6 @@ tmq_t* build_consumer() { return NULL; } - code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); - if (TMQ_CONF_OK != code) { - tmq_conf_destroy(conf); - return NULL; - } - tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); diff --git a/docs/examples/go/sub/main.go b/docs/examples/go/sub/main.go index cb24e351ab..ed335cfdea 100644 --- a/docs/examples/go/sub/main.go +++ b/docs/examples/go/sub/main.go @@ -35,7 +35,6 @@ func main() { "td.connect.port": "6030", "client.id": "test_tmq_client", "enable.auto.commit": "false", - "experimental.snapshot.enable": "true", "msg.with.table.name": "true", }) if err != nil { diff --git a/docs/zh/08-connector/10-cpp.mdx b/docs/zh/08-connector/10-cpp.mdx index 9c5095f09c..53c4bca755 100644 --- a/docs/zh/08-connector/10-cpp.mdx +++ b/docs/zh/08-connector/10-cpp.mdx @@ -515,3 +515,12 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 - 带_raw的接口通过传递的参数lines指针和长度len来表示数据,为了解决原始接口数据包含'\0'而被截断的问题。totalRows指针返回解析出来的数据行数。 - 带_ttl的接口可以传递ttl参数来控制建表的ttl到期时间。 - 带_reqid的接口可以通过传递reqid参数来追踪整个的调用链。 + +### 数据订阅 API + +除了使用 SQL 方式或者使用参数绑定 API 写入数据外,还可以使用 Schemaless 的方式完成写入。Schemaless 可以免于预先创建超级表/数据子表的数据结构,而是可以直接写入数据,TDengine 系统会根据写入的数据内容自动创建和维护所需要的表结构。Schemaless 的使用方式详见 [Schemaless 写入](/reference/schemaless/) 章节,这里介绍与之配套使用的 C/C++ API。 + +- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` + +**功能说明** +该接口将行协议的文本数据写入到 TDengine 中。 diff --git a/examples/c/tmq.c b/examples/c/tmq.c index e1133c109e..136c54b874 100644 --- a/examples/c/tmq.c +++ b/examples/c/tmq.c @@ -228,11 +228,6 @@ tmq_t* build_consumer() { tmq_conf_destroy(conf); return NULL; } - code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); - if (TMQ_CONF_OK != code) { - tmq_conf_destroy(conf); - return NULL; - } tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq = tmq_consumer_new(conf, NULL, 0); diff --git a/include/client/taos.h b/include/client/taos.h index 3cc2d907ab..a8136461f8 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -260,19 +260,14 @@ typedef struct tmq_t tmq_t; typedef struct tmq_conf_t tmq_conf_t; typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); +typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); -DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); +typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +} tmq_conf_res_t; -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); - -DLL_EXPORT const char *tmq_err2str(int32_t code); - -/* ------------------------TMQ CONSUMER INTERFACE------------------------ */ typedef struct tmq_topic_assignment { int32_t vgId; int64_t currentOffset; @@ -280,6 +275,18 @@ typedef struct tmq_topic_assignment { int64_t end; } tmq_topic_assignment; +DLL_EXPORT tmq_conf_t *tmq_conf_new(); +DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); +DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); +DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + +DLL_EXPORT tmq_list_t *tmq_list_new(); +DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); +DLL_EXPORT void tmq_list_destroy(tmq_list_t *); +DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); +DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + +DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); @@ -289,34 +296,17 @@ DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); -DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, - int32_t *numOfAssignment); +DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); +DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); +DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); -DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); -DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); - -/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */ - -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; - -typedef enum tmq_conf_res_t tmq_conf_res_t; - -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); - -/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */ +DLL_EXPORT const char *tmq_err2str(int32_t code); /* ------------------------------ TAOSX -----------------------------------*/ // note: following apis are unstable From f9c897221cc66cd49bfeaa0905e802a305aece6a Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 14 Aug 2023 16:30:31 +0800 Subject: [PATCH 103/147] fix: move the setting of onlyCtbIdx to front end --- source/libs/executor/src/operator.c | 1 - source/libs/planner/src/planPhysiCreater.c | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index abef8298e5..7f0c5baa36 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -371,7 +371,6 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR pOperator = createTableCountScanOperatorInfo(pHandle, pTblCountScanNode, pTaskInfo); } else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) { STagScanPhysiNode* pTagScanPhyNode = (STagScanPhysiNode*)pPhyNode; - pTagScanPhyNode->onlyMetaCtbIdx = false; STableListInfo* pTableListInfo = tableListCreate(); if (!pTagScanPhyNode->onlyMetaCtbIdx) { int32_t code = createScanTableListInfo((SScanPhysiNode*)pTagScanPhyNode, NULL, false, pHandle, pTableListInfo, pTagCond, diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 06859e195d..8efa9c1048 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -511,6 +511,20 @@ static int32_t createSimpleScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSub return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, pScan, pPhyNode); } +static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, + SPhysiNode** pPhyNode) { + STagScanPhysiNode* pScan = + (STagScanPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pScanLogicNode, QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN); + if (NULL == pScan) { + return TSDB_CODE_OUT_OF_MEMORY; + } + vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); + + pScan->onlyMetaCtbIdx = false; + + return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); +} + static int32_t createLastRowScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, SScanLogicNode* pScanLogicNode, SPhysiNode** pPhyNode) { SLastRowScanPhysiNode* pScan = @@ -646,6 +660,7 @@ static int32_t createScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan, pCxt->hasScan = true; switch (pScanLogicNode->scanType) { case SCAN_TYPE_TAG: + return createTagScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode); case SCAN_TYPE_BLOCK_INFO: return createSimpleScanPhysiNode(pCxt, pSubplan, pScanLogicNode, pPhyNode); case SCAN_TYPE_TABLE_COUNT: From 8d0461e98ca13a58c4bee1b9964a1ee3920b5346 Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 14 Aug 2023 16:31:58 +0800 Subject: [PATCH 104/147] fix: use taos_static for tmq_sim on windows --- cmake/cmake.define | 6 ++++++ tests/script/wtest.bat | 14 +++++++++++--- utils/test/c/CMakeLists.txt | 2 +- utils/tsim/CMakeLists.txt | 2 +- 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/cmake/cmake.define b/cmake/cmake.define index cf7f450994..6f4153c7d0 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -78,6 +78,12 @@ ELSE () SET(TD_TAOS_TOOLS TRUE) ENDIF () +IF (${TD_WINDOWS}) + SET(TAOS_LIB taos_static) +ELSE () + SET(TAOS_LIB taos) +ENDIF () + IF (TD_WINDOWS) MESSAGE("${Yellow} set compiler flag for Windows! ${ColourReset}") SET(COMMON_FLAGS "/w /D_WIN32 /DWIN32 /Zi /MTd") diff --git a/tests/script/wtest.bat b/tests/script/wtest.bat index b642bad285..88ae703b7c 100644 --- a/tests/script/wtest.bat +++ b/tests/script/wtest.bat @@ -17,6 +17,9 @@ rem echo SIM_DIR: %SIM_DIR% set "TSIM_DIR=%SIM_DIR%tsim\" rem echo TSIM_DIR: %TSIM_DIR% +set "DATA_DIR=%TSIM_DIR%data\" +rem echo DATA_DIR: %DATA_DIR% + set "CFG_DIR=%TSIM_DIR%cfg\" rem echo CFG_DIR: %CFG_DIR% @@ -30,25 +33,30 @@ if not exist %SIM_DIR% mkdir %SIM_DIR% if not exist %TSIM_DIR% mkdir %TSIM_DIR% if exist %CFG_DIR% rmdir /s/q %CFG_DIR% if exist %LOG_DIR% rmdir /s/q %LOG_DIR% +if exist %DATA_DIR% rmdir /s/q %DATA_DIR% if not exist %CFG_DIR% mkdir %CFG_DIR% if not exist %LOG_DIR% mkdir %LOG_DIR% +if not exist %DATA_DIR% mkdir %DATA_DIR% set "fqdn=localhost" for /f "skip=1" %%A in ( 'wmic computersystem get caption' ) do if not defined fqdn set "fqdn=%%A" -echo firstEp %fqdn% > %TAOS_CFG% +echo firstEp %fqdn%:7100 > %TAOS_CFG% +echo secondEp %fqdn%:7200 >> %TAOS_CFG% echo fqdn %fqdn% >> %TAOS_CFG% echo serverPort 7100 >> %TAOS_CFG% +echo dataDir %DATA_DIR% >> %TAOS_CFG% echo logDir %LOG_DIR% >> %TAOS_CFG% echo scriptDir %SCRIPT_DIR% >> %TAOS_CFG% echo numOfLogLines 100000000 >> %TAOS_CFG% echo rpcDebugFlag 143 >> %TAOS_CFG% echo tmrDebugFlag 131 >> %TAOS_CFG% -echo cDebugFlag 135 >> %TAOS_CFG% +echo cDebugFlag 143 >> %TAOS_CFG% echo qDebugFlag 143 >> %TAOS_CFG% -echo udebugFlag 135 >> %TAOS_CFG% +echo uDebugFlag 143 >> %TAOS_CFG% +echo debugFlag 143 >> %TAOS_CFG% echo wal 0 >> %TAOS_CFG% echo asyncLog 0 >> %TAOS_CFG% echo locale en_US.UTF-8 >> %TAOS_CFG% diff --git a/utils/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt index 3f52fc8e5d..b96814c13b 100644 --- a/utils/test/c/CMakeLists.txt +++ b/utils/test/c/CMakeLists.txt @@ -31,7 +31,7 @@ target_link_libraries( ) target_link_libraries( tmq_sim - PUBLIC taos + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os diff --git a/utils/tsim/CMakeLists.txt b/utils/tsim/CMakeLists.txt index c2cf7ac3c5..209982c659 100644 --- a/utils/tsim/CMakeLists.txt +++ b/utils/tsim/CMakeLists.txt @@ -2,7 +2,7 @@ aux_source_directory(src TSIM_SRC) add_executable(tsim ${TSIM_SRC}) target_link_libraries( tsim - PUBLIC taos_static + PUBLIC ${TAOS_LIB} PUBLIC util PUBLIC common PUBLIC os From 2e6263b43f5102f108fd38ce3a14ab590d3580d8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 14 Aug 2023 17:16:11 +0800 Subject: [PATCH 105/147] fix(stream): fix memory leak. --- source/dnode/vnode/src/tq/tqRestore.c | 5 +++-- source/libs/stream/src/streamDispatch.c | 4 ++-- source/libs/stream/src/streamExec.c | 17 +++++++++++------ source/libs/stream/src/streamRecover.c | 1 - 4 files changed, 16 insertions(+), 11 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqRestore.c b/source/dnode/vnode/src/tq/tqRestore.c index 3054179416..d363031db1 100644 --- a/source/dnode/vnode/src/tq/tqRestore.c +++ b/source/dnode/vnode/src/tq/tqRestore.c @@ -211,12 +211,13 @@ int32_t doSetOffsetForWalReader(SStreamTask *pTask, int32_t vgId) { static void checkForFillHistoryVerRange(SStreamTask* pTask, int64_t ver) { const char* id = pTask->id.idStr; + int64_t maxVer = pTask->dataRange.range.maxVer; if ((pTask->info.fillHistory == 1) && ver > pTask->dataRange.range.maxVer) { if (!pTask->status.appendTranstateBlock) { qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64 ", not scan wal anymore, add transfer-state block into inputQ", - id, ver, pTask->dataRange.range.maxVer); + id, ver, maxVer); double el = (taosGetTimestampMs() - pTask->tsInfo.step2Start) / 1000.0; qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, el); @@ -224,7 +225,7 @@ static void checkForFillHistoryVerRange(SStreamTask* pTask, int64_t ver) { /*int32_t code = */streamSchedExec(pTask); } else { qWarn("s-task:%s fill-history scan WAL, currentVer:%" PRId64 " reach the maximum ver:%" PRId64 ", not scan wal", - id, ver, pTask->dataRange.range.maxVer); + id, ver, maxVer); } } } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 06861454d1..557b92baf9 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -757,10 +757,10 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i qDebug("s-task:%s dispatch transtate msg to downstream successfully, start to transfer state", id); ASSERT(pTask->info.fillHistory == 1); code = streamTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { // todo: do nothing if error happens -// atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); } + + streamFreeQitem(pTask->msgInfo.pData); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index ccfa331661..cebae0801d 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -534,16 +534,21 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock } else { streamFreeQitem((SStreamQueueItem*)pBlock); } + } else { // level == TASK_LEVEL__SINK + streamFreeQitem((SStreamQueueItem*)pBlock); } } else { // non-dispatch task, do task state transfer directly - qDebug("s-task:%s non-dispatch task, start to transfer state directly", id); - streamFreeQitem((SStreamQueueItem*)pBlock); - ASSERT(pTask->info.fillHistory == 1); - code = streamTransferStateToStreamTask(pTask); + if (level != TASK_LEVEL__SINK) { + qDebug("s-task:%s non-dispatch task, start to transfer state directly", id); + ASSERT(pTask->info.fillHistory == 1); + code = streamTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { - atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); + if (code != TSDB_CODE_SUCCESS) { + atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); + } + } else { + qDebug("s-task:%d sink task does not transfer state", id); } } diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index c3d4d4c7ae..0a1a15259c 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -400,7 +400,6 @@ int32_t appendTranstateIntoInputQ(SStreamTask* pTask) { } pTask->status.appendTranstateBlock = true; - qDebug("s-task:%s set sched-status:%d, prev:%d", pTask->id.idStr, TASK_SCHED_STATUS__INACTIVE, pTask->status.schedStatus); return TSDB_CODE_SUCCESS; } From 62cad7c54f9a1784a03949c152c0bdb684f21316 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 14 Aug 2023 17:16:11 +0800 Subject: [PATCH 106/147] docs:tmq interface --- docs/en/07-develop/07-tmq.mdx | 88 ++++++++++++++++++++++----------- docs/zh/07-develop/07-tmq.mdx | 88 +++++++++++++++++++++++---------- docs/zh/08-connector/10-cpp.mdx | 9 ---- 3 files changed, 121 insertions(+), 64 deletions(-) diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index ccf39ef581..3326164f49 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -23,7 +23,20 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers. -Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products. +Tips:(c interface for example) +1. A consumption group consumes all data under the same topic, and different consumption groups are independent of each other; +2. A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data; +3. On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups; +4. Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block; +5. If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data; +6. If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready; +7. The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for; +8. Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again; +9. Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data; +10. Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported; +11. Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number; +12. The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four; +13. Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products. ## Data Schema and API @@ -33,40 +46,59 @@ The related schemas and APIs in various languages are described as follows: ```c -typedef struct tmq_t tmq_t; -typedef struct tmq_conf_t tmq_conf_t; -typedef struct tmq_list_t tmq_list_t; + typedef struct tmq_t tmq_t; + typedef struct tmq_conf_t tmq_conf_t; + typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); -DLL_EXPORT const char *tmq_err2str(int32_t code); + typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +} tmq_conf_res_t; -DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); -DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; +} tmq_topic_assignment; -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; -typedef enum tmq_conf_res_t tmq_conf_res_t; + DLL_EXPORT tmq_conf_t *tmq_conf_new(); + DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); + DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); + DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + DLL_EXPORT tmq_list_t *tmq_list_new(); + DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); + DLL_EXPORT void tmq_list_destroy(tmq_list_t *); + DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); + DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + + DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); + DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); + DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); + DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); + DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); + DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); + DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); + DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); + DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); + DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); + DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); + + DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); + DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); + DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); + DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); + DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); ``` -For more information, see [C/C++ Connector](/reference/connector/cpp). - The following example is based on the smart meter table described in Data Models. For complete sample code, see the C language section below. diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index 38b91d7cea..6852d5551e 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -25,7 +25,20 @@ import CDemo from "./_sub_c.mdx"; 本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。 -注意:数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似。 +说明(以c接口为例): +1. 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立; +2. 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据; +3. 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联; +4. 每次poll服务端会返回一个结果block,该block属于一个vgroup,可能包含多个wal版本的数据,可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset; +5. 一个消费组如果从未commit过offset,当其成员消费者重启重新拉取数据时,均从参数auto.offset.reset设定值开始消费;在一个消费者生命周期中,客户端本地记录了最近一次拉取数据的offset,不会拉取重复数据; +6. 消费者如果异常终止(没有调用tmq_close),需等约12秒后触发其所属消费组rebalance,该消费者在服务端状态变为LOST,约1天后该消费者自动被删除;正常退出,退出后就会删除消费者;新增消费者,需等约2秒触发rebalance,该消费者在服务端状态变为ready; +7. 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配,消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作; +8. 消费者可利用 tmq_position 获得当前消费的offset,并seek到指定offset,重新消费; +9. seek将position指向指定offset,不执行commit操作,一旦seek成功,可poll拉取指定offset及以后的数据; +10. seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法,如非法将报错; +11. tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset,当seek至该offset时,将消费到这个block里的全部数据。参见第四点; +12. 由于存在 WAL 过期删除机制,即使seek 操作成功,poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号,将会从WAL最小版本号消费; +13. 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似; ## 主要数据结构和 API @@ -35,39 +48,60 @@ import CDemo from "./_sub_c.mdx"; ```c -typedef struct tmq_t tmq_t; -typedef struct tmq_conf_t tmq_conf_t; -typedef struct tmq_list_t tmq_list_t; + typedef struct tmq_t tmq_t; + typedef struct tmq_conf_t tmq_conf_t; + typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); -DLL_EXPORT const char *tmq_err2str(int32_t code); + typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +} tmq_conf_res_t; -DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); -DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; +} tmq_topic_assignment; -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; -typedef enum tmq_conf_res_t tmq_conf_res_t; + DLL_EXPORT tmq_conf_t *tmq_conf_new(); + DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); + DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); + DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + DLL_EXPORT tmq_list_t *tmq_list_new(); + DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); + DLL_EXPORT void tmq_list_destroy(tmq_list_t *); + DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); + DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + + DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); + DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); + DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); + DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); + DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); + DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); + DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); + DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); + DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); + DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); + DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); + + DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); + DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); + DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); + DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); + DLL_EXPORT const char *tmq_err2str(int32_t code);DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); ``` -这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。 +下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。 diff --git a/docs/zh/08-connector/10-cpp.mdx b/docs/zh/08-connector/10-cpp.mdx index 53c4bca755..9c5095f09c 100644 --- a/docs/zh/08-connector/10-cpp.mdx +++ b/docs/zh/08-connector/10-cpp.mdx @@ -515,12 +515,3 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 - 带_raw的接口通过传递的参数lines指针和长度len来表示数据,为了解决原始接口数据包含'\0'而被截断的问题。totalRows指针返回解析出来的数据行数。 - 带_ttl的接口可以传递ttl参数来控制建表的ttl到期时间。 - 带_reqid的接口可以通过传递reqid参数来追踪整个的调用链。 - -### 数据订阅 API - -除了使用 SQL 方式或者使用参数绑定 API 写入数据外,还可以使用 Schemaless 的方式完成写入。Schemaless 可以免于预先创建超级表/数据子表的数据结构,而是可以直接写入数据,TDengine 系统会根据写入的数据内容自动创建和维护所需要的表结构。Schemaless 的使用方式详见 [Schemaless 写入](/reference/schemaless/) 章节,这里介绍与之配套使用的 C/C++ API。 - -- `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` - -**功能说明** -该接口将行协议的文本数据写入到 TDengine 中。 From 90cb67d006693a86cfc52b8675f2a98f451be537 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Mon, 14 Aug 2023 17:33:13 +0800 Subject: [PATCH 107/147] fix: table count issue --- tests/develop-test/2-query/table_count_scan.py | 16 ++++++++-------- tests/script/tsim/query/sys_tbname.sim | 2 +- tests/script/tsim/query/tableCount.sim | 6 +++--- tests/system-test/0-others/information_schema.py | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/develop-test/2-query/table_count_scan.py b/tests/develop-test/2-query/table_count_scan.py index 758d28948d..d6e49964eb 100644 --- a/tests/develop-test/2-query/table_count_scan.py +++ b/tests/develop-test/2-query/table_count_scan.py @@ -65,7 +65,7 @@ class TDTestCase: tdSql.query('select count(*),db_name, stable_name from information_schema.ins_tables group by db_name, stable_name;') tdSql.checkRows(3) - tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 0, 23) tdSql.checkData(0, 1, 'information_schema') tdSql.checkData(0, 2, None) tdSql.checkData(1, 0, 3) @@ -77,7 +77,7 @@ class TDTestCase: tdSql.query('select count(1) v,db_name, stable_name from information_schema.ins_tables group by db_name, stable_name order by v desc;') tdSql.checkRows(3) - tdSql.checkData(0, 0, 24) + tdSql.checkData(0, 0, 23) tdSql.checkData(0, 1, 'information_schema') tdSql.checkData(0, 2, None) tdSql.checkData(1, 0, 5) @@ -93,7 +93,7 @@ class TDTestCase: tdSql.checkData(1, 1, 'performance_schema') tdSql.checkData(0, 0, 3) tdSql.checkData(0, 1, 'tbl_count') - tdSql.checkData(2, 0, 24) + tdSql.checkData(2, 0, 23) tdSql.checkData(2, 1, 'information_schema') tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") @@ -106,7 +106,7 @@ class TDTestCase: tdSql.query('select count(*) from information_schema.ins_tables') tdSql.checkRows(1) - tdSql.checkData(0, 0, 32) + tdSql.checkData(0, 0, 31) tdSql.execute('create table stba (ts timestamp, c1 bool, c2 tinyint, c3 smallint, c4 int, c5 bigint, c6 float, c7 double, c8 binary(10), c9 nchar(10), c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) TAGS(t1 int, t2 binary(10), t3 double);') @@ -189,7 +189,7 @@ class TDTestCase: tdSql.checkData(2, 0, 5) tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.checkData(3, 0, 24) + tdSql.checkData(3, 0, 23) tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) @@ -204,7 +204,7 @@ class TDTestCase: tdSql.checkData(2, 0, 5) tdSql.checkData(2, 1, 'performance_schema') tdSql.checkData(2, 2, None) - tdSql.checkData(3, 0, 24) + tdSql.checkData(3, 0, 23) tdSql.checkData(3, 1, 'information_schema') tdSql.checkData(3, 2, None) @@ -215,7 +215,7 @@ class TDTestCase: tdSql.checkData(0, 1, 'tbl_count') tdSql.checkData(1, 0, 5) tdSql.checkData(1, 1, 'performance_schema') - tdSql.checkData(2, 0, 24) + tdSql.checkData(2, 0, 23) tdSql.checkData(2, 1, 'information_schema') tdSql.query("select count(*) from information_schema.ins_tables where db_name='tbl_count'") @@ -228,7 +228,7 @@ class TDTestCase: tdSql.query('select count(*) from information_schema.ins_tables') tdSql.checkRows(1) - tdSql.checkData(0, 0, 33) + tdSql.checkData(0, 0, 32) tdSql.execute('drop database tbl_count') diff --git a/tests/script/tsim/query/sys_tbname.sim b/tests/script/tsim/query/sys_tbname.sim index f49a8e0a7d..6ed978aa15 100644 --- a/tests/script/tsim/query/sys_tbname.sim +++ b/tests/script/tsim/query/sys_tbname.sim @@ -58,7 +58,7 @@ endi sql select tbname from information_schema.ins_tables; print $rows $data00 -if $rows != 33 then +if $rows != 32 then return -1 endi if $data00 != @ins_tables@ then diff --git a/tests/script/tsim/query/tableCount.sim b/tests/script/tsim/query/tableCount.sim index 6e65852dcc..524b43620c 100644 --- a/tests/script/tsim/query/tableCount.sim +++ b/tests/script/tsim/query/tableCount.sim @@ -53,7 +53,7 @@ sql select stable_name,count(table_name) from information_schema.ins_tables grou if $rows != 3 then return -1 endi -if $data01 != 30 then +if $data01 != 29 then return -1 endi if $data11 != 10 then @@ -72,7 +72,7 @@ endi if $data11 != 5 then return -1 endi -if $data21 != 24 then +if $data21 != 23 then return -1 endi if $data31 != 5 then @@ -97,7 +97,7 @@ endi if $data42 != 3 then return -1 endi -if $data52 != 24 then +if $data52 != 23 then return -1 endi if $data62 != 5 then diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py index 762361f051..f7788d1d50 100644 --- a/tests/system-test/0-others/information_schema.py +++ b/tests/system-test/0-others/information_schema.py @@ -55,7 +55,7 @@ class TDTestCase: ] self.binary_str = 'taosdata' self.nchar_str = '涛思数据' - self.ins_list = ['ins_dnodes','ins_mnodes','ins_modules','ins_qnodes','ins_snodes','ins_cluster','ins_databases','ins_functions',\ + self.ins_list = ['ins_dnodes','ins_mnodes','ins_qnodes','ins_snodes','ins_cluster','ins_databases','ins_functions',\ 'ins_indexes','ins_stables','ins_tables','ins_tags','ins_columns','ins_users','ins_grants','ins_vgroups','ins_configs','ins_dnode_variables',\ 'ins_topics','ins_subscriptions','ins_streams','ins_stream_tasks','ins_vnodes','ins_user_privileges'] self.perf_list = ['perf_connections','perf_queries','perf_consumers','perf_trans','perf_apps'] From 1d33a8d4c09e0c3eaa48b08da506df3d1611aebf Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 14 Aug 2023 10:06:33 +0000 Subject: [PATCH 108/147] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndStb.c | 43 ++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 70fd74afc0..903181282a 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -18,6 +18,7 @@ #include "mndDb.h" #include "mndDnode.h" #include "mndIndex.h" +#include "mndIndexComm.h" #include "mndInfoSchema.h" #include "mndMnode.h" #include "mndPerfSchema.h" @@ -822,7 +823,7 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat return -1; } - if(pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags){ + if (pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags) { terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; } @@ -857,11 +858,36 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea SStbObj stbObj = {0}; int32_t code = -1; + char fullIdxName[TSDB_INDEX_FNAME_LEN * 2] = {0}; + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "create-stb"); if (pTrans == NULL) goto _OVER; mInfo("trans:%d, used to create stb:%s", pTrans->id, pCreate->name); if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER; + + SSchema *pSchema = &(stbObj.pTags[0]); + sprintf(fullIdxName, "%s.%s_default", pDb->name, pSchema->name); + + SSIdx idx = {0}; + if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0) { + terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; + mndReleaseIdx(pMnode, idx.pIdx); + goto _OVER; + } + + SIdxObj idxObj; + memcpy(idxObj.name, fullIdxName, TSDB_INDEX_FNAME_LEN); + memcpy(idxObj.stb, stbObj.name, TSDB_TABLE_FNAME_LEN); + memcpy(idxObj.db, stbObj.db, TSDB_DB_FNAME_LEN); + memcpy(idxObj.colName, pSchema->name, TSDB_COL_NAME_LEN); + idxObj.createdTime = taosGetTimestampMs(); + idxObj.uid = mndGenerateUid(fullIdxName, strlen(fullIdxName)); + idxObj.stbUid = stbObj.uid; + idxObj.dbUid = stbObj.dbUid; + + if (mndSetCreateIdxCommitLogs(pMnode, pTrans, &idxObj) < 0) goto _OVER; + if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; code = 0; @@ -956,7 +982,7 @@ static int32_t mndBuildStbFromAlter(SStbObj *pStb, SStbObj *pDst, SMCreateStbReq return -1; } - if(pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags){ + if (pDst->nextColId < 0 || pDst->nextColId >= 0x7fff - pDst->numOfColumns - pDst->numOfTags) { terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; } @@ -1188,7 +1214,7 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p return -1; } - if(pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ntags){ + if (pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ntags) { terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; } @@ -1478,7 +1504,8 @@ static int32_t mndAlterStbTagBytes(SMnode *pMnode, const SStbObj *pOld, SStbObj SSchema *pTag = pNew->pTags + tag; - if (!(pTag->type == TSDB_DATA_TYPE_BINARY || pTag->type == TSDB_DATA_TYPE_NCHAR || pTag->type == TSDB_DATA_TYPE_GEOMETRY)) { + if (!(pTag->type == TSDB_DATA_TYPE_BINARY || pTag->type == TSDB_DATA_TYPE_NCHAR || + pTag->type == TSDB_DATA_TYPE_GEOMETRY)) { terrno = TSDB_CODE_MND_INVALID_STB_OPTION; return -1; } @@ -1506,7 +1533,7 @@ static int32_t mndAddSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, SArray return -1; } - if(pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ncols){ + if (pNew->nextColId < 0 || pNew->nextColId >= 0x7fff - ncols) { terrno = TSDB_CODE_MND_FIELD_VALUE_OVERFLOW; return -1; } @@ -1598,7 +1625,8 @@ static int32_t mndAlterStbColumnBytes(SMnode *pMnode, const SStbObj *pOld, SStbO } SSchema *pCol = pNew->pColumns + col; - if (!(pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR || pCol->type == TSDB_DATA_TYPE_GEOMETRY)) { + if (!(pCol->type == TSDB_DATA_TYPE_BINARY || pCol->type == TSDB_DATA_TYPE_NCHAR || + pCol->type == TSDB_DATA_TYPE_GEOMETRY)) { terrno = TSDB_CODE_MND_INVALID_STB_OPTION; return -1; } @@ -3182,7 +3210,6 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB SSdb *pSdb = pMnode->pSdb; SStbObj *pStb = NULL; - int32_t numOfRows = 0; if (!pShow->sysDbRsp) { numOfRows = buildSysDbColsInfo(pBlock, pShow->db, pShow->filterTb); @@ -3206,7 +3233,7 @@ static int32_t mndRetrieveStbCol(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB if (pShow->pIter == NULL) break; } else { fetch = true; - void *pKey = taosHashGetKey(pShow->pIter, NULL); + void *pKey = taosHashGetKey(pShow->pIter, NULL); pStb = sdbAcquire(pSdb, SDB_STB, pKey); if (!pStb) continue; } From 5d3232d275f97ecf63205dc04c211b0887f5e39e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 14 Aug 2023 18:14:32 +0800 Subject: [PATCH 109/147] fix(stream): fix an syntax error. --- source/libs/stream/src/streamExec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index cebae0801d..b513d7c13e 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -548,7 +548,7 @@ int32_t streamProcessTranstateBlock(SStreamTask* pTask, SStreamDataBlock* pBlock atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); } } else { - qDebug("s-task:%d sink task does not transfer state", id); + qDebug("s-task:%s sink task does not transfer state", id); } } From 012248b68142496f0e8e2fa1833b4df4912b2c82 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 14 Aug 2023 19:26:53 +0800 Subject: [PATCH 110/147] fix: move the only ctb idx flag to logical plan --- include/libs/nodes/plannodes.h | 1 + source/libs/nodes/src/nodesCodeFuncs.c | 10 ++++++++-- source/libs/planner/src/planOptimizer.c | 3 +++ source/libs/planner/src/planPhysiCreater.c | 2 +- 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 0830dc4918..3e24e417fc 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -107,6 +107,7 @@ typedef struct SScanLogicNode { bool sortPrimaryKey; bool igLastNull; bool groupOrderScan; + bool onlyMetaCtbIdx; // for tag scan with no tbname } SScanLogicNode; typedef struct SJoinLogicNode { diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index a2de0bc63a..4dfc55c0fa 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -660,6 +660,7 @@ static const char* jkScanLogicPlanDynamicScanFuncs = "DynamicScanFuncs"; static const char* jkScanLogicPlanDataRequired = "DataRequired"; static const char* jkScanLogicPlanTagCond = "TagCond"; static const char* jkScanLogicPlanGroupTags = "GroupTags"; +static const char* jkScanLogicPlanOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) { const SScanLogicNode* pNode = (const SScanLogicNode*)pObj; @@ -701,7 +702,9 @@ static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = nodeListToJson(pJson, jkScanLogicPlanGroupTags, pNode->pGroupTags); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkScanLogicPlanOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); + } return code; } @@ -746,7 +749,10 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkScanLogicPlanGroupTags, &pNode->pGroupTags); } - + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkScanLogicPlanOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); + } + return code; } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 16440be511..6944fc9f18 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -2679,6 +2679,9 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp } nodesDestroyNode((SNode*)pAgg); tagScanOptCloneAncestorSlimit((SLogicNode*)pScanNode); + + pScanNode->onlyMetaCtbIdx = false; + pCxt->optimized = true; return TSDB_CODE_SUCCESS; } diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 8efa9c1048..5f78b5de9c 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -520,7 +520,7 @@ static int32_t createTagScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubpla } vgroupInfoToNodeAddr(pScanLogicNode->pVgroupList->vgroups, &pSubplan->execNode); - pScan->onlyMetaCtbIdx = false; + pScan->onlyMetaCtbIdx = pScanLogicNode->onlyMetaCtbIdx; return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pScan, pPhyNode); } From 1a08ffc79f3a8f58c75b60cde50111bc8722ed42 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 14 Aug 2023 19:27:09 +0800 Subject: [PATCH 111/147] fix(stream): check error. --- source/libs/stream/src/streamDispatch.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 557b92baf9..8de034d1ca 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -730,10 +730,14 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i // flag. here we need to retry dispatch this message to downstream task immediately. handle the case the failure // happened too fast. // todo handle the shuffle dispatch failure - qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", id, pRsp->downstreamTaskId, - tstrerror(code), ++pTask->msgInfo.retryCount); - int32_t ret = doDispatchAllBlocks(pTask, pTask->msgInfo.pData); - if (ret != TSDB_CODE_SUCCESS) { + if (code == TSDB_CODE_STREAM_TASK_NOT_EXIST) { // destination task does not exist, not retry anymore + qWarn("s-task:%s failed to dispatch msg to task:0x%x, no retry, since it is destroyed already", id); + } else { + qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", id, pRsp->downstreamTaskId, + tstrerror(code), ++pTask->msgInfo.retryCount); + int32_t ret = doDispatchAllBlocks(pTask, pTask->msgInfo.pData); + if (ret != TSDB_CODE_SUCCESS) { + } } return TSDB_CODE_SUCCESS; From a334547726a5a85a8092119773dd2f5e24a5eeab Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 14 Aug 2023 19:28:36 +0800 Subject: [PATCH 112/147] fix(stream): fix an syntax error. --- source/libs/stream/src/streamDispatch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 8de034d1ca..12ff064963 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -731,7 +731,7 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i // happened too fast. // todo handle the shuffle dispatch failure if (code == TSDB_CODE_STREAM_TASK_NOT_EXIST) { // destination task does not exist, not retry anymore - qWarn("s-task:%s failed to dispatch msg to task:0x%x, no retry, since it is destroyed already", id); + qWarn("s-task:%s failed to dispatch msg to task:0x%x, no retry, since it is destroyed already", id, pRsp->downstreamTaskId); } else { qError("s-task:%s failed to dispatch msg to task:0x%x, code:%s, retry cnt:%d", id, pRsp->downstreamTaskId, tstrerror(code), ++pTask->msgInfo.retryCount); From a48d137d32c039fb68764abde491eb221e992e75 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 14 Aug 2023 11:56:47 +0000 Subject: [PATCH 113/147] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndStb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 903181282a..cfac5d0a61 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -870,7 +870,7 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea sprintf(fullIdxName, "%s.%s_default", pDb->name, pSchema->name); SSIdx idx = {0}; - if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0) { + if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0 && idx.pIdx != NULL) { terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; mndReleaseIdx(pMnode, idx.pIdx); goto _OVER; From 3067417ea31eacf21f94fe57c6691f5c2a60d4d8 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 14 Aug 2023 12:01:17 +0000 Subject: [PATCH 114/147] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndStb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index cfac5d0a61..03b05f8c82 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -873,6 +873,7 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0 && idx.pIdx != NULL) { terrno = TSDB_CODE_MND_TAG_INDEX_ALREADY_EXIST; mndReleaseIdx(pMnode, idx.pIdx); + goto _OVER; } From 1447d1d55c036ac5f050e3072e01d06d33f5200d Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 00:40:12 +0000 Subject: [PATCH 115/147] rebuild index at tag0 --- include/os/osRand.h | 2 ++ source/dnode/mnode/impl/src/mndStb.c | 4 +++- source/os/src/osRand.c | 17 +++++++++++++---- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/include/os/osRand.h b/include/os/osRand.h index 27d07e8c6f..5d907bba15 100644 --- a/include/os/osRand.h +++ b/include/os/osRand.h @@ -32,6 +32,8 @@ void taosSeedRand(uint32_t seed); uint32_t taosRand(void); uint32_t taosRandR(uint32_t* pSeed); void taosRandStr(char* str, int32_t size); +void taosRandStr2(char* str, int32_t size); + uint32_t taosSafeRand(void); #ifdef __cplusplus diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index 03b05f8c82..ccfad404d8 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -866,8 +866,10 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea mInfo("trans:%d, used to create stb:%s", pTrans->id, pCreate->name); if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER; + char randStr[16] = {0}; + taosRandStr2(randStr, tListLen(randStr) - 1); SSchema *pSchema = &(stbObj.pTags[0]); - sprintf(fullIdxName, "%s.%s_default", pDb->name, pSchema->name); + sprintf(fullIdxName, "%s.%s_%s", pDb->name, pSchema->name, randStr); SSIdx idx = {0}; if (mndAcquireGlobalIdx(pMnode, fullIdxName, SDB_IDX, &idx) == 0 && idx.pIdx != NULL) { diff --git a/source/os/src/osRand.c b/source/os/src/osRand.c index 83c36a422d..9cb6f6e52a 100644 --- a/source/os/src/osRand.c +++ b/source/os/src/osRand.c @@ -27,11 +27,11 @@ void taosSeedRand(uint32_t seed) { return srand(seed); } uint32_t taosRand(void) { #ifdef WINDOWS - unsigned int pSeed; - rand_s(&pSeed); - return pSeed; + unsigned int pSeed; + rand_s(&pSeed); + return pSeed; #else - return rand(); + return rand(); #endif } @@ -80,6 +80,15 @@ void taosRandStr(char* str, int32_t size) { const char* set = "abcdefghijklmnopqrstuvwxyz0123456789-_."; int32_t len = 39; + for (int32_t i = 0; i < size; ++i) { + str[i] = set[taosRand() % len]; + } +} + +void taosRandStr2(char* str, int32_t size) { + const char* set = "abcdefghijklmnopqrstuvwxyz0123456789"; + int32_t len = 36; + for (int32_t i = 0; i < size; ++i) { str[i] = set[taosRand() % len]; } From ef7f762c62b6bb43272a189ca16225738209f872 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 00:41:48 +0000 Subject: [PATCH 116/147] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndStb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index ccfad404d8..f80e721324 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -866,7 +866,7 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea mInfo("trans:%d, used to create stb:%s", pTrans->id, pCreate->name); if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER; - char randStr[16] = {0}; + char randStr[24] = {0}; taosRandStr2(randStr, tListLen(randStr) - 1); SSchema *pSchema = &(stbObj.pTags[0]); sprintf(fullIdxName, "%s.%s_%s", pDb->name, pSchema->name, randStr); From 9ec64b9201a34223eaec8e46f91e9c8c00283d32 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 02:01:13 +0000 Subject: [PATCH 117/147] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndStb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index f80e721324..c6dce0d578 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -879,7 +879,7 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea goto _OVER; } - SIdxObj idxObj; + SIdxObj idxObj = {0}; memcpy(idxObj.name, fullIdxName, TSDB_INDEX_FNAME_LEN); memcpy(idxObj.stb, stbObj.name, TSDB_TABLE_FNAME_LEN); memcpy(idxObj.db, stbObj.db, TSDB_DB_FNAME_LEN); From 271ecf6beff4dcea8e83b6945406a3050dd2d793 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 10:41:01 +0800 Subject: [PATCH 118/147] update tag index case --- tests/system-test/0-others/show_tag_index.py | 20 +++++++++++-------- tests/system-test/0-others/tag_index_basic.py | 7 +++++-- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/tests/system-test/0-others/show_tag_index.py b/tests/system-test/0-others/show_tag_index.py index 663426b7ff..d39f9eaab9 100644 --- a/tests/system-test/0-others/show_tag_index.py +++ b/tests/system-test/0-others/show_tag_index.py @@ -59,14 +59,18 @@ class TDTestCase: tdSql.checkData(1, 2, 2) def check_indexes(self): - tdSql.checkRows(1) - tdSql.checkCols(7) - tdSql.checkData(0, 0, 'idx1') - tdSql.checkData(0, 1, 'db') - tdSql.checkData(0, 2, 'stb') - tdSql.checkData(0, 3, None) - tdSql.checkData(0, 5, 't1') - tdSql.checkData(0, 6, 'tag_index') + tdSql.checkRows(2) + for i in range(2): + col_name = tdSql.getData(i, 5) + if col_name == "t0": + continue + tdSql.checkCols(7) + tdSql.checkData(i, 0, 'idx1') + tdSql.checkData(i, 1, 'db') + tdSql.checkData(i, 2, 'stb') + tdSql.checkData(i, 3, None) + tdSql.checkData(i, 5, 't1') + tdSql.checkData(i, 6, 'tag_index') def run(self): tdSql.execute(f'create database db') diff --git a/tests/system-test/0-others/tag_index_basic.py b/tests/system-test/0-others/tag_index_basic.py index 72ed559ffd..c1e1d521d2 100644 --- a/tests/system-test/0-others/tag_index_basic.py +++ b/tests/system-test/0-others/tag_index_basic.py @@ -118,12 +118,15 @@ class TDTestCase: def show_tagidx(self, stbname): sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="db"' tdSql.query(sql) - rows = len(self.tag_dict.keys())-1 + rows = len(self.tag_dict.keys()) tdSql.checkRows(rows) for i in range(rows): col_name = tdSql.getData(i, 1) idx_name = f'idx_{col_name}' + # skip first tag + if col_name == "t1": + continue tdSql.checkData(i, 0, idx_name) tdLog.info(f' show {rows} tag indexs ok.') @@ -201,7 +204,7 @@ class TDTestCase: # check idx result is 0 sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="db"' tdSql.query(sql) - tdSql.checkRows(0) + tdSql.checkRows(1) tdLog.info(f' drop {cnt} tag indexs ok.') # create long name idx From 0acb00c5165de1b2031870b02a3085829bb9a07a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 15 Aug 2023 11:48:56 +0800 Subject: [PATCH 119/147] fix(stream): add log. --- source/libs/stream/src/stream.c | 2 +- source/libs/stream/src/streamDispatch.c | 3 ++- source/libs/stream/src/streamExec.c | 12 +++--------- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c index e9b38dfff2..af67490888 100644 --- a/source/libs/stream/src/stream.c +++ b/source/libs/stream/src/stream.c @@ -378,7 +378,7 @@ int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) { } } else if (type == STREAM_INPUT__CHECKPOINT || type == STREAM_INPUT__TRANS_STATE) { taosWriteQitem(pTask->inputQueue->queue, pItem); - qDebug("s-task:%s trans-state blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr, total, size); + qDebug("s-task:%s checkpoint/trans-state blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr, total, size); } else if (type == STREAM_INPUT__GET_RES) { // use the default memory limit, refactor later. taosWriteQitem(pTask->inputQueue->queue, pItem); diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 12ff064963..bb32173404 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -364,7 +364,8 @@ static int32_t doSendDispatchMsg(SStreamTask* pTask, const SStreamDispatchReq* p msg.pCont = buf; msg.msgType = pTask->msgInfo.msgType; - qDebug("s-task:%s dispatch msg to taskId:0x%x vgId:%d data msg", pTask->id.idStr, pReq->taskId, vgId); + qDebug("s-task:%s dispatch msg to taskId:0x%x vgId:%d data msg, len:%d", pTask->id.idStr, pReq->taskId, vgId, + msg.contLen); return tmsgSendReq(pEpSet, &msg); FAIL: diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index b513d7c13e..6e6f23be01 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -421,17 +421,12 @@ static int32_t extractBlocksFromInputQ(SStreamTask* pTask, SStreamQueueItem** pI SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue); if (qItem == NULL) { - if (pTask->info.taskLevel == TASK_LEVEL__SOURCE && (++retryTimes) < MAX_RETRY_TIMES) { - taosMsleep(10); - qDebug("===stream===try again batchSize:%d, retry:%d, %s", *numOfBlocks, retryTimes, id); - continue; - } - qDebug("===stream===break batchSize:%d, %s", *numOfBlocks, id); return TSDB_CODE_SUCCESS; } - qDebug("s-task:%s sink task handle result block one-by-one", id); + qDebug("s-task:%s sink task handle block one-by-one, type:%d", id, qItem->type); + *numOfBlocks = 1; *pInput = qItem; return TSDB_CODE_SUCCESS; @@ -467,8 +462,7 @@ static int32_t extractBlocksFromInputQ(SStreamTask* pTask, SStreamQueueItem** pI return TSDB_CODE_SUCCESS; } else { // previous existed blocks needs to be handle, before handle the checkpoint msg block - qDebug("s-task:%s checkpoint/transtate msg extracted, handle previous block first, numOfBlocks:%d", id, - *numOfBlocks); + qDebug("s-task:%s checkpoint/transtate msg extracted, handle previous blocks, numOfBlocks:%d", id, *numOfBlocks); streamQueueProcessFail(pTask->inputQueue); return TSDB_CODE_SUCCESS; } From 01a7dfbc341dc6a41b0ecd8b3ec6150e51d6fd08 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 15 Aug 2023 12:59:30 +0800 Subject: [PATCH 120/147] feat: add irate distributed execution --- include/libs/function/functionMgt.h | 2 + source/libs/function/inc/builtinsimpl.h | 3 + source/libs/function/src/builtins.c | 63 +++++++++++++++++++ source/libs/function/src/builtinsimpl.c | 81 +++++++++++++++++++++++++ 4 files changed, 149 insertions(+) diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index 402b8f0309..eebb69e6ba 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -157,6 +157,8 @@ typedef enum EFunctionType { FUNCTION_TYPE_AVG_MERGE, FUNCTION_TYPE_STDDEV_PARTIAL, FUNCTION_TYPE_STDDEV_MERGE, + FUNCTION_TYPE_IRATE_PARTIAL, + FUNCTION_TYPE_IRATE_MERGE, // geometry functions FUNCTION_TYPE_GEOM_FROM_TEXT = 4250, diff --git a/source/libs/function/inc/builtinsimpl.h b/source/libs/function/inc/builtinsimpl.h index c3afc30a7b..d2f19ed2eb 100644 --- a/source/libs/function/inc/builtinsimpl.h +++ b/source/libs/function/inc/builtinsimpl.h @@ -127,7 +127,10 @@ int32_t derivativeFunction(SqlFunctionCtx* pCtx); bool getIrateFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); bool irateFuncSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo); int32_t irateFunction(SqlFunctionCtx* pCtx); +int32_t irateFunctionMerge(SqlFunctionCtx* pCtx); int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +int32_t iratePartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock); +int32_t getIrateInfoSize(); int32_t cachedLastRowFunction(SqlFunctionCtx* pCtx); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index cee4000155..ef7c9d1442 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1567,6 +1567,43 @@ static int32_t translateIrate(SFunctionNode* pFunc, char* pErrBuf, int32_t len) return TSDB_CODE_SUCCESS; } +static int32_t translateIrateImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { + if (3 != LIST_LENGTH(pFunc->pParameterList)) { + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + } + + uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; + if (isPartial) { + if (!IS_NUMERIC_TYPE(colType)) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + pFunc->node.resType = (SDataType){.bytes = getIrateInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; + } else { + if (TSDB_DATA_TYPE_BINARY != colType) { + return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); + } + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_DOUBLE].bytes, .type = TSDB_DATA_TYPE_DOUBLE}; + + // add database precision as param + uint8_t dbPrec = pFunc->node.resType.precision; + int32_t code = addDbPrecisonParam(&pFunc->pParameterList, dbPrec); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + + + return TSDB_CODE_SUCCESS; +} + +static int32_t translateIratePartial(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + return translateIrateImpl(pFunc, pErrBuf, len, true); +} + +static int32_t translateIrateMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { + return translateIrateImpl(pFunc, pErrBuf, len, false); +} + static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList); uint8_t dbPrec = pFunc->node.resType.precision; @@ -2604,6 +2641,32 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .initFunc = irateFuncSetup, .processFunc = irateFunction, .sprocessFunc = irateScalarFunction, + .finalizeFunc = irateFinalize, + .pPartialFunc = "_irate_partial", + .pMergeFunc = "_irate_merge" + }, + { + .name = "_irate_partial", + .type = FUNCTION_TYPE_IRATE_PARTIAL, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | + FUNC_MGT_FORBID_SYSTABLE_FUNC, + .translateFunc = translateIratePartial, + .getEnvFunc = getIrateFuncEnv, + .initFunc = irateFuncSetup, + .processFunc = irateFunction, + .sprocessFunc = irateScalarFunction, + .finalizeFunc = iratePartialFinalize + }, + { + .name = "_irate_merge", + .type = FUNCTION_TYPE_IRATE_MERGE, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | + FUNC_MGT_FORBID_SYSTABLE_FUNC, + .translateFunc = translateIrateMerge, + .getEnvFunc = getIrateFuncEnv, + .initFunc = irateFuncSetup, + .processFunc = irateFunctionMerge, + .sprocessFunc = irateScalarFunction, .finalizeFunc = irateFinalize }, { diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index fad8c9ca5b..631f943915 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -5768,6 +5768,8 @@ int32_t derivativeFunction(SqlFunctionCtx* pCtx) { return TSDB_CODE_SUCCESS; } +int32_t getIrateInfoSize() { return (int32_t)sizeof(SRateInfo); } + bool getIrateFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv) { pEnv->calcMemSize = sizeof(SRateInfo); return true; @@ -5868,6 +5870,85 @@ static double doCalcRate(const SRateInfo* pRateInfo, double tickPerSec) { return (duration > 0) ? ((double)diff) / (duration / tickPerSec) : 0.0; } +static void irateTransferInfoImpl(TSKEY inputKey, SRateInfo* pInput, SRateInfo* pOutput) { + if (inputKey > pOutput->lastKey) { + pOutput->firstKey = pOutput->lastKey; + pOutput->lastKey = pInput->firstKey; + + pOutput->firstValue = pOutput->lastValue; + pOutput->lastValue = pInput->firstValue; + } else if ((inputKey < pOutput->lastKey) && (inputKey > pOutput->firstKey)) { + pOutput->firstKey = pOutput->lastKey; + pOutput->firstValue = pOutput->lastValue; + } else { + // inputKey < pOutput->firstKey + } +} + +static int32_t irateTransferInfo(SRateInfo* pInput, SRateInfo* pOutput) { + pOutput->hasResult = pInput->hasResult; + if (pInput->firstKey == pOutput->firstKey || pInput->firstKey == pOutput->lastKey || + pInput->lastKey == pOutput->firstKey || pInput->lastKey == pOutput->lastKey) { + return TSDB_CODE_FUNC_DUP_TIMESTAMP; + } + + if (pInput->firstKey != INT64_MIN) { + irateTransferInfoImpl(pInput->firstKey, pInput, pOutput); + } + + if (pInput->lastKey != INT64_MIN) { + irateTransferInfoImpl(pInput->lastKey, pInput, pOutput); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t irateFunctionMerge(SqlFunctionCtx* pCtx) { + SInputColumnInfoData* pInput = &pCtx->input; + SColumnInfoData* pCol = pInput->pData[0]; + if (pCol->info.type != TSDB_DATA_TYPE_BINARY) { + return TSDB_CODE_FUNC_FUNTION_PARA_TYPE; + } + + SRateInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + + int32_t start = pInput->startRowIndex; + for (int32_t i = start; i < start + pInput->numOfRows; ++i) { + char* data = colDataGetData(pCol, i); + SRateInfo* pInputInfo = (SRateInfo*)varDataVal(data); + if (pInputInfo->hasResult) { + int32_t code = irateTransferInfo(pInputInfo, pInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + } + + if (pInfo->hasResult) { + GET_RES_INFO(pCtx)->numOfRes = 1; + } + + return TSDB_CODE_SUCCESS; +} + +int32_t iratePartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { + SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx); + SRateInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + int32_t resultBytes = getIrateInfoSize(); + char* res = taosMemoryCalloc(resultBytes + VARSTR_HEADER_SIZE, sizeof(char)); + + memcpy(varDataVal(res), pInfo, resultBytes); + varDataSetLen(res, resultBytes); + + int32_t slotId = pCtx->pExpr->base.resSchema.slotId; + SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); + + colDataSetVal(pCol, pBlock->info.rows, res, false); + + taosMemoryFree(res); + return pResInfo->numOfRes; +} + int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { int32_t slotId = pCtx->pExpr->base.resSchema.slotId; SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId); From f7a5bef17dbd3d70365528370973618b31c2439e Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 15 Aug 2023 13:02:32 +0800 Subject: [PATCH 121/147] enh: disable stream/udf on windows --- include/util/taoserror.h | 1 + source/dnode/mgmt/mgmt_dnode/src/dmInt.c | 2 ++ source/dnode/mgmt/mgmt_qnode/src/qmInt.c | 2 ++ source/dnode/mgmt/mgmt_snode/src/smInt.c | 2 ++ source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 2 ++ source/dnode/mgmt/node_mgmt/src/dmEnv.c | 4 +++- source/libs/parser/src/parTranslater.c | 13 +++++++++++++ source/util/src/terror.c | 1 + 8 files changed, 26 insertions(+), 1 deletion(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index b43985074c..75ab916230 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -707,6 +707,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667) #define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668) #define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669) +#define TSDB_CODE_PAR_INVALID_PLATFORM TAOS_DEF_ERROR_CODE(0, 0x2670) #define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF) //planner diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c index 09783a5ea9..f59d04e618 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c @@ -59,9 +59,11 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { return -1; } +#ifdef WINDOWS if (udfStartUdfd(pMgmt->pData->dnodeId) != 0) { dError("failed to start udfd"); } +#endif pOutput->pMgmt = pMgmt; return 0; diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c index 3b425a0b49..82bc2f36f0 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c @@ -57,11 +57,13 @@ static int32_t qmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("qnode-impl", "initialized"); +#ifdef WINDOWS if (udfcOpen() != 0) { dError("qnode can not open udfc"); qmClose(pMgmt); return -1; } +#endif if (qmStartWorker(pMgmt) != 0) { dError("failed to start qnode worker since %s", terrstr()); diff --git a/source/dnode/mgmt/mgmt_snode/src/smInt.c b/source/dnode/mgmt/mgmt_snode/src/smInt.c index e222349767..7607fcac61 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smInt.c +++ b/source/dnode/mgmt/mgmt_snode/src/smInt.c @@ -65,11 +65,13 @@ int32_t smOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("snode-worker", "initialized"); +#ifdef WINDOWS if (udfcOpen() != 0) { dError("failed to open udfc in snode"); smClose(pMgmt); return -1; } +#endif pOutput->pMgmt = pMgmt; return 0; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 0ff2537e4c..872577cf28 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -571,10 +571,12 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("vnode-vnodes", "initialized"); +#ifdef WINDOWS if (udfcOpen() != 0) { dError("failed to open udfc in vnode"); goto _OVER; } +#endif code = 0; diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index a34002161d..a8f871dc96 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -198,8 +198,10 @@ void dmCleanup() { monCleanup(); syncCleanUp(); walCleanUp(); - udfcClose(); +#ifdef WINDOWS + udfcClose(); udfStopUdfd(); +#endif taosStopCacheRefreshWorker(); dmDiskClose(); dInfo("dnode env is cleaned up"); diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index a41447edf3..9c3beea2d8 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -4418,6 +4418,10 @@ static int32_t checkDbRetentionsOption(STranslateContext* pCxt, SNodeList* pRete return TSDB_CODE_SUCCESS; } +#ifdef WINDOWS + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); +#endif + if (LIST_LENGTH(pRetentions) > 3) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION, "Invalid option retentions"); } @@ -5867,6 +5871,9 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS } static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { +#ifdef WINDOWS + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); +#endif int32_t code = checkCreateSmaIndex(pCxt, pStmt); pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq)); if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY; @@ -7052,6 +7059,9 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* } static int32_t translateCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { +#ifdef WINDOWS + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); +#endif SCMCreateStreamReq createReq = {0}; int32_t code = checkCreateStream(pCxt, pStmt); @@ -7201,6 +7211,9 @@ static int32_t readFromFile(char* pName, int32_t* len, char** buf) { } static int32_t translateCreateFunction(STranslateContext* pCxt, SCreateFunctionStmt* pStmt) { +#ifdef WINDOWS + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); +#endif if (fmIsBuiltinFunc(pStmt->funcName)) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_FUNCTION_NAME); } diff --git a/source/util/src/terror.c b/source/util/src/terror.c index b0b407e2a5..74352f2799 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -570,6 +570,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GET_META_ERROR, "Fail to get table i TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, "System table not allowed") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, "System table not allowed") +TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platformXX") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error") //planner From 71c3c710e6db673658a5ead9b455166e28a8a5ab Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 15 Aug 2023 14:23:20 +0800 Subject: [PATCH 122/147] fix(stream): continue process when met with trans-state msg. --- source/libs/stream/src/streamExec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 6e6f23be01..02bbce6485 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -575,7 +575,7 @@ int32_t streamExecForAll(SStreamTask* pTask) { if (pInput->type == STREAM_INPUT__TRANS_STATE) { streamProcessTranstateBlock(pTask, (SStreamDataBlock*)pInput); - return 0; + continue; } if (pTask->info.taskLevel == TASK_LEVEL__SINK) { From a576a3b972ff69adde0b88907be7f37afadc7201 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 06:23:27 +0000 Subject: [PATCH 123/147] rebuild index at tag0 --- tests/script/tsim/sma/drop_sma.sim | 65 +++++++++++++++++++++--------- 1 file changed, 47 insertions(+), 18 deletions(-) diff --git a/tests/script/tsim/sma/drop_sma.sim b/tests/script/tsim/sma/drop_sma.sim index 8fd8ebdcfd..fcf48f2b36 100644 --- a/tests/script/tsim/sma/drop_sma.sim +++ b/tests/script/tsim/sma/drop_sma.sim @@ -52,19 +52,35 @@ sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) in print --> show sma sql show indexes from stb from d1; -if $rows != 1 then +if $rows != 2 then return -1 endi -if $data[0][0] != sma_index_name1 then - return -1 -endi -if $data[0][1] != d1 then - return -1 -endi -if $data[0][2] != stb then - return -1 + +if $data[0][6] == tag_index then + if $data[1][0] != sma_index_name1 then + return -1 + endi + if $data[1][1] != d1 then + return -1 + endi + if $data[1][2] != stb then + return -1 + endi +else + if $data[0][0] != sma_index_name1 then + return -1 + endi + if $data[0][1] != d1 then + return -1 + endi + if $data[0][2] != stb then + return -1 + endi endi + + + print --> drop stb sql drop table stb; @@ -78,17 +94,30 @@ sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) in print --> show sma sql show indexes from stb from d1; -if $rows != 1 then +if $rows != 2 then return -1 endi -if $data[0][0] != sma_index_name1 then - return -1 -endi -if $data[0][1] != d1 then - return -1 -endi -if $data[0][2] != stb then - return -1 + +if $data[0][6] == tag_index then + if $data[1][0] != sma_index_name1 then + return -1 + endi + if $data[1][1] != d1 then + return -1 + endi + if $data[1][2] != stb then + return -1 + endi +else + if $data[0][0] != sma_index_name1 then + return -1 + endi + if $data[0][1] != d1 then + return -1 + endi + if $data[0][2] != stb then + return -1 + endi endi print --> drop stb From 27db6cfd676bc238a640adfb5471c8b6df954c8c Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 15 Aug 2023 14:25:31 +0800 Subject: [PATCH 124/147] enh: disable udf/stream for mnd on windows --- source/dnode/mnode/impl/src/mndDb.c | 6 ++++++ source/dnode/mnode/impl/src/mndFunc.c | 4 ++++ source/dnode/mnode/impl/src/mndSma.c | 4 ++++ source/dnode/mnode/impl/src/mndStream.c | 4 ++++ 4 files changed, 18 insertions(+) diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 1bd629e56f..fdefe9e5b1 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -668,6 +668,12 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { } mInfo("db:%s, start to create, vgroups:%d", createReq.db, createReq.numOfVgroups); +#ifdef WINDOWS + if (taosArrayGetSize(createReq.pRetensions) > 0) { + code = TSDB_CODE_PAR_INVALID_PLATFORM; + goto _OVER; + } +#endif if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_DB, NULL) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index 4ffc7a20c2..5f4ac830cd 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -361,6 +361,10 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) { } mInfo("func:%s, start to create, size:%d", createReq.name, createReq.codeLen); +#ifdef WINDOWS + code = TSDB_CODE_PAR_INVALID_PLATFORM; + goto _OVER; +#endif if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_FUNC) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index b84297f6bf..ff3f66efaf 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -655,6 +655,10 @@ _OVER: } static int32_t mndCheckCreateSmaReq(SMCreateSmaReq *pCreate) { +#ifdef WINDOWS + terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + return -1; +#endif terrno = TSDB_CODE_MND_INVALID_SMA_OPTION; if (pCreate->name[0] == 0) return -1; if (pCreate->stb[0] == 0) return -1; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index a0d53ec780..d6bb8c167f 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -253,6 +253,10 @@ static void mndShowStreamTrigger(char *dst, SStreamObj *pStream) { } static int32_t mndCheckCreateStreamReq(SCMCreateStreamReq *pCreate) { +#ifdef WINDOWS + terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + return -1; +#endif if (pCreate->name[0] == 0 || pCreate->sql == NULL || pCreate->sql[0] == 0 || pCreate->sourceDB[0] == 0 || pCreate->targetStbFullName[0] == 0) { terrno = TSDB_CODE_MND_INVALID_STREAM_OPTION; From 8639c22cc0cd5e684710fe4dfda644a6e0362e54 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 14:53:58 +0800 Subject: [PATCH 125/147] vnode, common: USE_COS def --- source/common/CMakeLists.txt | 8 ++++++++ source/dnode/vnode/CMakeLists.txt | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/source/common/CMakeLists.txt b/source/common/CMakeLists.txt index 356ea2be1c..b010467f20 100644 --- a/source/common/CMakeLists.txt +++ b/source/common/CMakeLists.txt @@ -16,6 +16,14 @@ ENDIF () IF (TD_STORAGE) ADD_DEFINITIONS(-D_STORAGE) TARGET_LINK_LIBRARIES(common PRIVATE storage) + + IF(${TD_LINUX}) + IF(${BUILD_WITH_COS}) + add_definitions(-DUSE_COS) + ENDIF(${BUILD_WITH_COS}) + + ENDIF(${TD_LINUX}) + ENDIF () target_include_directories( diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 052b6be37f..c70df86e20 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -189,6 +189,10 @@ target_include_directories( PUBLIC "$ENV{HOME}/.cos-local.1/include" ) +if(${BUILD_WITH_COS}) + add_definitions(-DUSE_COS) +endif(${BUILD_WITH_COS}) + endif(${TD_LINUX}) IF (TD_GRANT) From bd758e0269786711de840303a921b93b4e097d2c Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 15:14:34 +0800 Subject: [PATCH 126/147] retention: remove old files last --- source/dnode/vnode/src/inc/vndCos.h | 2 +- source/dnode/vnode/src/tsdb/tsdbRetention.c | 23 +++++++++++---------- source/dnode/vnode/src/vnd/vnodeCos.c | 11 ++++++++-- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/source/dnode/vnode/src/inc/vndCos.h b/source/dnode/vnode/src/inc/vndCos.h index f6db7f096e..cf2c5eb441 100644 --- a/source/dnode/vnode/src/inc/vndCos.h +++ b/source/dnode/vnode/src/inc/vndCos.h @@ -26,7 +26,7 @@ extern int8_t tsS3Enabled; int32_t s3Init(); void s3CleanUp(); -void s3PutObjectFromFile(const char *file, const char *object); +int32_t s3PutObjectFromFile(const char *file, const char *object); void s3DeleteObjects(const char *object_name[], int nobject); bool s3Exists(const char *object_name); bool s3Get(const char *object_name, const char *path); diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index ebe20c0e85..46a5d19a1a 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -114,7 +114,8 @@ static int32_t tsdbCopyFileS3(SRTNer *rtner, const STFileObj *from, const STFile TSDB_CHECK_CODE(code, lino, _exit); char *object_name = taosDirEntryBaseName(fname); - s3PutObjectFromFile(from->fname, object_name); + code = s3PutObjectFromFile(from->fname, object_name); + TSDB_CHECK_CODE(code, lino, _exit); taosCloseFile(&fdFrom); @@ -178,16 +179,6 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const int32_t lino = 0; STFileOp op = {0}; - // remove old - op = (STFileOp){ - .optype = TSDB_FOP_REMOVE, - .fid = fobj->f->fid, - .of = fobj->f[0], - }; - - code = TARRAY2_APPEND(rtner->fopArr, op); - TSDB_CHECK_CODE(code, lino, _exit); - // create new op = (STFileOp){ .optype = TSDB_FOP_CREATE, @@ -213,6 +204,16 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const code = tsdbCopyFileS3(rtner, fobj, &op.nf); TSDB_CHECK_CODE(code, lino, _exit); + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + _exit: if (code) { TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index b28b7ad747..02021831bf 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -51,7 +51,8 @@ static void s3InitRequestOptions(cos_request_options_t *options, int is_cname) { options->ctl = cos_http_controller_create(options->pool, 0); } -void s3PutObjectFromFile(const char *file_str, const char *object_str) { +int32_t s3PutObjectFromFile(const char *file_str, const char *object_str) { + int32_t code = 0; cos_pool_t *p = NULL; int is_cname = 0; cos_status_t *s = NULL; @@ -76,6 +77,12 @@ void s3PutObjectFromFile(const char *file_str, const char *object_str) { log_status(s); cos_pool_destroy(p); + + if (s->code != 200) { + return code = s->code; + } + + return code; } void s3DeleteObjects(const char *object_name[], int nobject) { @@ -300,7 +307,7 @@ long s3Size(const char *object_name) { int32_t s3Init() { return 0; } void s3CleanUp() {} -void s3PutObjectFromFile(const char *file, const char *object) {} +int32_t s3PutObjectFromFile(const char *file, const char *object) { return 0; } void s3DeleteObjects(const char *object_name[], int nobject) {} bool s3Exists(const char *object_name) { return false; } bool s3Get(const char *object_name, const char *path) { return false; } From 0f0d0953cdbed4eaeed80e114cf2de6dccbd9290 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 07:26:53 +0000 Subject: [PATCH 127/147] rebuild index at tag0 --- source/dnode/mnode/impl/src/mndIndex.c | 4 +- tests/parallel_test/cases.task | 3 ++ tests/script/tsim/tagindex/add_index.sim | 48 +++++++++++++++++-- .../tsim/tagindex/sma_and_tag_index.sim | 22 +++++---- 4 files changed, 63 insertions(+), 14 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndIndex.c b/source/dnode/mnode/impl/src/mndIndex.c index b56ea320cc..2e78116a86 100644 --- a/source/dnode/mnode/impl/src/mndIndex.c +++ b/source/dnode/mnode/impl/src/mndIndex.c @@ -696,8 +696,8 @@ int8_t mndCheckIndexNameByTagName(SMnode *pMnode, SIdxObj *pIdxObj) { continue; } if (strncmp(pIdxObj->colName, pIdx->colName, TSDB_COL_NAME_LEN) == 0) { + sdbCancelFetch(pSdb, pIter); sdbRelease(pSdb, pIdx); - sdbCancelFetch(pSdb, pIdx); exist = 1; break; } @@ -854,8 +854,8 @@ int32_t mndDropIdxsByStb(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p if (pIdx->stbUid == pStb->uid) { if (mndSetDropIdxCommitLogs(pMnode, pTrans, pIdx) != 0) { + sdbCancelFetch(pSdb, pIter); sdbRelease(pSdb, pIdx); - sdbCancelFetch(pSdb, pIdx); return -1; } } diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index a946a7feaf..e81339d705 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1204,6 +1204,9 @@ ,,y,script,./test.sh -f tsim/tag/drop_tag.sim ,,y,script,./test.sh -f tsim/tag/tbNameIn.sim ,,y,script,./test.sh -f tmp/monitor.sim +,,y,script,./test.sh -f tsim/tagindex/add_index.sim +,,y,script,./test.sh -f tsim/tagindex/sma_and_tag_index.sim + #develop test ,,n,develop-test,python3 ./test.py -f 2-query/table_count_scan.py diff --git a/tests/script/tsim/tagindex/add_index.sim b/tests/script/tsim/tagindex/add_index.sim index cfbec90542..a6e9cae670 100644 --- a/tests/script/tsim/tagindex/add_index.sim +++ b/tests/script/tsim/tagindex/add_index.sim @@ -7,7 +7,7 @@ print ======== step0 $dbPrefix = ta_3_db $tbPrefix = ta_3_tb $mtPrefix = ta_3_mt -$tbNum = 500 +$tbNum = 50 $rowNum = 20 $totalNum = 200 @@ -48,12 +48,16 @@ while $i < $tbNum $i = $i + 1 endw - +sql_error create index ti1 on $mtPrefix (t1) sql create index ti2 on $mtPrefix (t2) sql create index ti5 on $mtPrefix (t5) print ==== test name conflict # + +sql_error create index ti1 on $mtPrefix(t1) +sql_error create index ti11 on $mtPrefix(t1) + sql_error create index ti3 on $mtPrefix(t2) sql_error create index ti2 on $mtPrefix(t2) @@ -73,6 +77,15 @@ while $i < $tbNum $i = $i + 1 endw +$i = 0 +while $i < $tbNum + sql select * from $mtPrefix where t1= $i ; + if $rows != 1 then + return -1 + endi + $i = $i + 1 +endw + print ===== test operator great equal @@ -250,7 +263,7 @@ endw print === show index sql select * from information_schema.ins_indexes -if $rows != 1 then +if $rows != 2 then return -1 endi @@ -259,12 +272,41 @@ print === drop index ti2 sql drop index ti2 print === drop not exist index + +sql select * from information_schema.ins_indexes +if $rows != 1 then + return -1 +endi + +sql drop index $data[0][0] + +if $rows != 0 then + return -1 +endi + + sql_error drop index t2 sql_error drop index t3 +sql create index ti0 on $mtPrefix (t1) + +$i = $interval +while $i < $limit + sql select * from $mtPrefix where t1 <= $i ; + + $tmp = $i - $interval + $tmp = $tmp + 1 + if $rows != $tmp then + return -1 + endi + $i = $i + 1 +endw sql_error create index ti0 on $mtPrefix (t1) +sql_error create index ti2 on $mtPrefix (t1) + + sql_error create index t2i on ta_3_tb17 (t2) diff --git a/tests/script/tsim/tagindex/sma_and_tag_index.sim b/tests/script/tsim/tagindex/sma_and_tag_index.sim index b15d22d439..e7e4682810 100644 --- a/tests/script/tsim/tagindex/sma_and_tag_index.sim +++ b/tests/script/tsim/tagindex/sma_and_tag_index.sim @@ -69,7 +69,7 @@ sql create sma index smat2i on $mtPrefix function(max(c1)) interval(6m,10s) slid sql select * from information_schema.ins_indexes -if $rows != 2 then +if $rows != 3 then return -1 endi @@ -84,7 +84,7 @@ while $i < 5 endw sql select * from information_schema.ins_indexes -if $rows != 6 then +if $rows != 7 then return -1 endi @@ -114,13 +114,13 @@ sql use $dbPrefix sql create table if not exists $mtPrefix (ts timestamp, c1 int) tags (t1 int, t2 int, t3 int, t4 int, t5 int) sql create index tagt2i on $mtPrefix (t2) sql select * from information_schema.ins_indexes -if $rows != 1 then +if $rows != 2 then return -1 endi sql alter table $mtPrefix drop tag t2 sql select * from information_schema.ins_indexes -if $rows != 0 then +if $rows != 1 then return -1 endi @@ -128,18 +128,22 @@ endi print ==== rename tag name, and update index colName sql create index tagt3i on $mtPrefix (t3) sql select * from information_schema.ins_indexes -if $rows != 1 then +if $rows != 2 then return -1 endi sql alter table $mtPrefix rename tag t3 txxx sql select * from information_schema.ins_indexes -if $rows != 1 then +if $rows != 2 then return -1 endi -if $data05 != txxx then - return -1 +if $data05 == txxx then + print "manual created index" +elif $data15 == txxx then + print "auto created index at tag0" +else + return -1; endi @@ -153,7 +157,7 @@ sql create table if not exists $mtPrefix (ts timestamp, c1 int) tags (t1 int, t2 sql create index tagt3i on $mtPrefix (t3) sql select * from information_schema.ins_indexes -if $rows != 2 then +if $rows != 4 then return -1 endi From beb84b2355199ea9fdedb5305273440f3c4331f0 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 15 Aug 2023 15:37:39 +0800 Subject: [PATCH 128/147] fix:tmq interface & remove snapshot code --- docs/en/07-develop/07-tmq.mdx | 91 +++++++++++----- docs/en/14-reference/03-connector/03-cpp.mdx | 93 +++++++++++++++- docs/en/14-reference/03-connector/04-java.mdx | 2 - docs/examples/c/tmq_example.c | 6 -- docs/examples/go/sub/main.go | 1 - docs/zh/07-develop/07-tmq.mdx | 91 +++++++++++----- docs/zh/08-connector/10-cpp.mdx | 102 ++++++++++++++++-- examples/c/tmq.c | 5 - include/client/taos.h | 62 +++++------ 9 files changed, 341 insertions(+), 112 deletions(-) diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx index ccf39ef581..e78855dad3 100644 --- a/docs/en/07-develop/07-tmq.mdx +++ b/docs/en/07-develop/07-tmq.mdx @@ -23,7 +23,25 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers. -Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products. +Tips:(c interface for example) +- A consumption group consumes all data under the same topic, and different consumption groups are independent of each other; +- A consumption group consumes all vgroups of the same topic, which can be composed of multiple consumers, but a vgroup is only consumed by one consumer. If the number of consumers exceeds the number of vgroups, the excess consumers do not consume data; +- On the server side, only one offset is saved for each vgroup, and the offsets for each vgroup are monotonically increasing, but not necessarily continuous. There is no correlation between the offsets of various vgroups; +- Each poll server will return a result block, which belongs to a vgroup and may contain data from multiple versions of wal. This block can be accessed through tmq_get_vgroup_offset. The offset interface obtains the offset of the first record in the block; +- If a consumer group has never committed an offset, when its member consumers restart and pull data again, they start consuming from the set value of the parameter auto.offset.reset; In a consumer lifecycle, the client locally records the offset of the most recent pull data and will not pull duplicate data; +- If a consumer terminates abnormally (without calling tmq_close), they need to wait for about 12 seconds to trigger their consumer group rebalance. The consumer's status on the server will change to LOST, and after about 1 day, the consumer will be automatically deleted; Exit normally, and after exiting, the consumer will be deleted; Add a new consumer, wait for about 2 seconds to trigger Rebalance, and the consumer's status on the server will change to ready; +- The consumer group Rebalance will reassign Vgroups to all consumer members in the ready state of the group, and consumers can only assign/see/commit/poll operations to the Vgroups they are responsible for; +- Consumers can tmq_position to obtain the offset of the current consumption, seek to the specified offset, and consume again; +- Seek points the position to the specified offset without executing the commit operation. Once the seek is successful, it can poll the specified offset and subsequent data; +- Position is to obtain the current consumption position, which is the position to be taken next time, not the current consumption position +- Commit is the submission of the consumption location. Without parameters, it is the submission of the current consumption location (the location to be taken next time, not the current consumption location). With parameters, it is the location in the submission parameters (i.e. the location to be taken after the next exit and restart) +- Seek is to set the consumer's consumption position. Wherever the seek goes, the position will be returned, all of which are the positions to be taken next time +- Seek does not affect commit, commit does not affect seek, independent of each other, the two are different concepts +- The begin interface is the offset of the first data in wal, and the end interface is the offset+1 of the last data in wal10. +- Before the seek operation, tmq must be call tmq_get_topic_assignment, The assignment interface obtains the vgroup ID and offset range of the consumer. The seek operation will detect whether the vgroup ID and offset are legal, and if they are illegal, an error will be reported; +- Due to the existence of a WAL expiration deletion mechanism, even if the seek operation is successful, it is possible that the offset has expired when polling data. If the offset of poll is less than the WAL minimum version number, it will be consumed from the WAL minimum version number; +- The tmq_get_vgroup_offset interface obtains the offset of the first data in the result block where the record is located. When seeking to this offset, it will consume all the data in this block. Refer to point four; +- Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products. ## Data Schema and API @@ -33,36 +51,57 @@ The related schemas and APIs in various languages are described as follows: ```c -typedef struct tmq_t tmq_t; -typedef struct tmq_conf_t tmq_conf_t; -typedef struct tmq_list_t tmq_list_t; + typedef struct tmq_t tmq_t; + typedef struct tmq_conf_t tmq_conf_t; + typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); -DLL_EXPORT const char *tmq_err2str(int32_t code); + typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, + } tmq_conf_res_t; -DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); -DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; // The last version of wal + 1 + } tmq_topic_assignment; -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; -typedef enum tmq_conf_res_t tmq_conf_res_t; + DLL_EXPORT tmq_conf_t *tmq_conf_new(); + DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); + DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); + DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + DLL_EXPORT tmq_list_t *tmq_list_new(); + DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); + DLL_EXPORT void tmq_list_destroy(tmq_list_t *); + DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); + DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + + DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); + DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); + DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); + DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); + DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); + DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); + DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msg’s offset + 1 + DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); + DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); + DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1 + DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); + + DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); + DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); + DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); + DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result + DLL_EXPORT const char *tmq_err2str(int32_t code); ``` For more information, see [C/C++ Connector](/reference/connector/cpp). diff --git a/docs/en/14-reference/03-connector/03-cpp.mdx b/docs/en/14-reference/03-connector/03-cpp.mdx index 13029dbe91..0009902425 100644 --- a/docs/en/14-reference/03-connector/03-cpp.mdx +++ b/docs/en/14-reference/03-connector/03-cpp.mdx @@ -378,7 +378,7 @@ In addition to writing data using the SQL method or the parameter binding API, w - `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` **Function description** - This interface writes the text data of the line protocol to TDengine. + - This interface writes the text data of the line protocol to TDengine. **Parameter description** - taos: database connection, established by the `taos_connect()` function. @@ -387,12 +387,13 @@ In addition to writing data using the SQL method or the parameter binding API, w - protocol: the protocol type of the lines, used to identify the text data format. - precision: precision string for the timestamp in the text data. - **return value** - TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`. + **Return value** + - TAOS_RES structure, application can get error message by using `taos_errstr()` and also error code by using `taos_errno()`. In some cases, the returned TAOS_RES is `NULL`, and it is still possible to call `taos_errno()` to safely get the error code information. The returned TAOS_RES needs to be freed by the caller in order to avoid memory leaks. **Description** + The protocol type is enumerated and contains the following three formats. - TSDB_SML_LINE_PROTOCOL: InfluxDB line protocol (Line Protocol) @@ -427,3 +428,89 @@ In addition to writing data using the SQL method or the parameter binding API, w - Within _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows. - Within _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table. - Within _reqid interfaces can track the entire call chain by passing the reqid parameter. + +### Subscription API + +- `int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment)` +- `void tmq_free_assignment(tmq_topic_assignment* pAssignment)` + + tmq_topic_assignment defined as follows: + ```c + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; + } tmq_topic_assignment; + ``` + **Function description** + - tmq_get_topic_assignment get the current vgroup information of this consumer + + **Parameter description** + - numOfAssignment:the num of vgroups assigned to this consumer + - assignment:the information of vgroups, needed to be freed by tmq_free_assignment + + **Return value** + - zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)` + +- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)` + **Function description** + - get the committed offset + + **Return value** + - the value of committed offset, -2147467247 means no committed value, Other values less than 0 indicate failure + +- `int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg)` +- `void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param)` +- `int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)` +- `void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param)` + + **Function description** + + The commit interface is divided into two types, each with synchronous and asynchronous interfaces: + - The first type: based on message submission, submit the progress in the message. If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer: tmq_commit_sync/tmq_commit_async + - The second type: submit based on the offset of a Vgroup in a topic: tmq_commit_offset_sync/tmq_commit_offset_async + + **Parameter description** + - msg:Message consumed, If the message passes NULL, submit the current progress of all vgroups consumed by the current consumer + + **Return value** + - zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)` + +- `int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId)` + + **Function description** + - Obtain the current consumption location, which is the next location of the data consumed + + **Return value** + - the current consumption location, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)` + +- `int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)` + + **Function description** + - Set the offset position of the consumer in a Vgroup of a certain topic to start consumption + + **Return value** + - zero success, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)` + +- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)` + + **Function description** + - Obtain the starting offset of the consumed data + + **Parameter description** + - msg:Message consumed + + **Return value** + - the starting offset of the consumed data, none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)` + +- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)` + + **Function description** + - Obtain a list of topics subscribed by consumers + + **Parameter description** + - topics: a list of topics subscribed by consumers,need to be freed by tmq_list_destroy + + **Return value** + - zero success,none zero failed, wrong message can be obtained through `char *tmq_err2str(int32_t code)` \ No newline at end of file diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx index 69bbd287ed..ff1f209788 100644 --- a/docs/en/14-reference/03-connector/04-java.mdx +++ b/docs/en/14-reference/03-connector/04-java.mdx @@ -1091,8 +1091,6 @@ public abstract class ConsumerLoop { config.setProperty("client.id", "1"); config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer"); config.setProperty("value.deserializer.encoding", "UTF-8"); - config.setProperty("experimental.snapshot.enable", "true"); - this.consumer = new TaosConsumer<>(config); this.topics = Collections.singletonList("topic_speed"); diff --git a/docs/examples/c/tmq_example.c b/docs/examples/c/tmq_example.c index d958428b8f..ca7c23e93b 100644 --- a/docs/examples/c/tmq_example.c +++ b/docs/examples/c/tmq_example.c @@ -227,12 +227,6 @@ tmq_t* build_consumer() { return NULL; } - code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); - if (TMQ_CONF_OK != code) { - tmq_conf_destroy(conf); - return NULL; - } - tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq_t* tmq = tmq_consumer_new(conf, NULL, 0); diff --git a/docs/examples/go/sub/main.go b/docs/examples/go/sub/main.go index cb24e351ab..ed335cfdea 100644 --- a/docs/examples/go/sub/main.go +++ b/docs/examples/go/sub/main.go @@ -35,7 +35,6 @@ func main() { "td.connect.port": "6030", "client.id": "test_tmq_client", "enable.auto.commit": "false", - "experimental.snapshot.enable": "true", "msg.with.table.name": "true", }) if err != nil { diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx index 38b91d7cea..04c978679e 100644 --- a/docs/zh/07-develop/07-tmq.mdx +++ b/docs/zh/07-develop/07-tmq.mdx @@ -25,7 +25,25 @@ import CDemo from "./_sub_c.mdx"; 本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。 -注意:数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似。 +说明(以c接口为例): +- 一个消费组消费同一个topic下的所有数据,不同消费组之间相互独立; +- 一个消费组消费同一个topic所有的vgroup,消费组可由多个消费者组成,但一个vgroup仅被一个消费者消费,如果消费者数量超过了vgroup数量,多余的消费者不消费数据; +- 在服务端每个vgroup仅保存一个offset,每个vgroup的offset是单调递增的,但不一定连续。各个vgroup的offset之间没有关联; +- 每次poll服务端会返回一个结果block,该block属于一个vgroup,可能包含多个wal版本的数据,可以通过 tmq_get_vgroup_offset 接口获得是该block第一条记录的offset; +- 一个消费组如果从未commit过offset,当其成员消费者重启重新拉取数据时,均从参数auto.offset.reset设定值开始消费;在一个消费者生命周期中,客户端本地记录了最近一次拉取数据的offset,不会拉取重复数据; +- 消费者如果异常终止(没有调用tmq_close),需等约12秒后触发其所属消费组rebalance,该消费者在服务端状态变为LOST,约1天后该消费者自动被删除;正常退出,退出后就会删除消费者;新增消费者,需等约2秒触发rebalance,该消费者在服务端状态变为ready; +- 消费组rebalance会对该组所有ready状态的消费者成员重新进行vgroup分配,消费者仅能对自己负责的vgroup进行assignment/seek/commit/poll操作; +- 消费者可利用 tmq_position 获得当前消费的offset,并seek到指定offset,重新消费; +- seek将position指向指定offset,不执行commit操作,一旦seek成功,可poll拉取指定offset及以后的数据; +- seek 操作之前须调用 tmq_get_topic_assignment 接口获取该consumer的vgroup ID和offset范围。seek 操作会检测vgroup ID 和 offset是否合法,如非法将报错; +- position是获取当前的消费位置,是下次要取的位置,不是当前消费到的位置 +- commit是提交消费位置,不带参数的话,是提交当前消费位置(下次要取的位置,不是当前消费到的位置),带参数的话,是提交参数里的位置(也即下次退出重启后要取的位置) +- seek是设置consumer消费位置,seek到哪,position就返回哪,都是下次要取的位置 +- seek不会影响commit,commit不影响seek,相互独立,两个是不同的概念 +- begin接口为wal 第一条数据的offset,end 接口为wal 最后一条数据的offset + 1 +- tmq_get_vgroup_offset接口获取的是记录所在结果block块里的第一条数据的offset,当seek至该offset时,将消费到这个block里的全部数据。参见第四点; +- 由于存在 WAL 过期删除机制,即使seek 操作成功,poll数据时有可能offset已失效。如果poll 的offset 小于 WAL 最小版本号,将会从WAL最小版本号消费; +- 数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似; ## 主要数据结构和 API @@ -35,36 +53,57 @@ import CDemo from "./_sub_c.mdx"; ```c -typedef struct tmq_t tmq_t; -typedef struct tmq_conf_t tmq_conf_t; -typedef struct tmq_list_t tmq_list_t; + typedef struct tmq_t tmq_t; + typedef struct tmq_conf_t tmq_conf_t; + typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); + typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); -DLL_EXPORT tmq_list_t *tmq_list_new(); -DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); -DLL_EXPORT void tmq_list_destroy(tmq_list_t *); -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); -DLL_EXPORT const char *tmq_err2str(int32_t code); + typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, + } tmq_conf_res_t; -DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); -DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); -DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); -DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); -DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; // The last version of wal + 1 + } tmq_topic_assignment; -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; -typedef enum tmq_conf_res_t tmq_conf_res_t; + DLL_EXPORT tmq_conf_t *tmq_conf_new(); + DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); + DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); + DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); + DLL_EXPORT tmq_list_t *tmq_list_new(); + DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); + DLL_EXPORT void tmq_list_destroy(tmq_list_t *); + DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); + DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); + + DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); + DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); + DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); + DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); + DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); + DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); + DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msg’s offset + 1 + DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); + DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); + DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); + DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); + DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1 + DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); + + DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); + DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); + DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); + DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result + DLL_EXPORT const char *tmq_err2str(int32_t code); ``` 这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。 diff --git a/docs/zh/08-connector/10-cpp.mdx b/docs/zh/08-connector/10-cpp.mdx index 9c5095f09c..c0723cd85c 100644 --- a/docs/zh/08-connector/10-cpp.mdx +++ b/docs/zh/08-connector/10-cpp.mdx @@ -467,21 +467,22 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 - `TAOS_RES* taos_schemaless_insert(TAOS* taos, const char* lines[], int numLines, int protocol, int precision)` **功能说明** - 该接口将行协议的文本数据写入到 TDengine 中。 + - 该接口将行协议的文本数据写入到 TDengine 中。 **参数说明** - taos: 数据库连接,通过 `taos_connect()` 函数建立的数据库连接。 - lines:文本数据。满足解析格式要求的无模式文本字符串。 - numLines:文本数据的行数,不能为 0 。 - protocol: 行协议类型,用于标识文本数据格式。 - precision:文本数据中的时间戳精度字符串。 + - taos: 数据库连接,通过 `taos_connect()` 函数建立的数据库连接。 + - lines:文本数据。满足解析格式要求的无模式文本字符串。 + - numLines:文本数据的行数,不能为 0 。 + - protocol: 行协议类型,用于标识文本数据格式。 + - precision:文本数据中的时间戳精度字符串。 **返回值** - TAOS_RES 结构体,应用可以通过使用 `taos_errstr()` 获得错误信息,也可以使用 `taos_errno()` 获得错误码。 + - TAOS_RES 结构体,应用可以通过使用 `taos_errstr()` 获得错误信息,也可以使用 `taos_errno()` 获得错误码。 在某些情况下,返回的 TAOS_RES 为 `NULL`,此时仍然可以调用 `taos_errno()` 来安全地获得错误码信息。 返回的 TAOS_RES 需要调用方来负责释放,否则会出现内存泄漏。 **说明** + 协议类型是枚举类型,包含以下三种格式: - TSDB_SML_LINE_PROTOCOL:InfluxDB 行协议(Line Protocol) @@ -515,3 +516,90 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 - 带_raw的接口通过传递的参数lines指针和长度len来表示数据,为了解决原始接口数据包含'\0'而被截断的问题。totalRows指针返回解析出来的数据行数。 - 带_ttl的接口可以传递ttl参数来控制建表的ttl到期时间。 - 带_reqid的接口可以通过传递reqid参数来追踪整个的调用链。 + +### 数据订阅 API + +- `int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, int32_t *numOfAssignment)` +- `void tmq_free_assignment(tmq_topic_assignment* pAssignment)` + + tmq_topic_assignment结构体定义如下: + ```c + typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; + } tmq_topic_assignment; + ``` + **功能说明** + - tmq_get_topic_assignment 接口返回当前consumer分配的vgroup的信息,每个vgroup的信息包括vgId,wal的最大最小offset,以及当前消费到的offset。 + + **参数说明** + - numOfAssignment :分配给该consumer有效的vgroup个数。 + - assignment :分配的信息,数据大小为numOfAssignment,需要通过 tmq_free_assignment 接口释放。 + + **返回值** + - 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息。 + +- `int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId)` + **功能说明** + - 获取当前 consumer 在某个 topic 和 vgroup上的 commit 位置。 + + **返回值** + - 当前commit的位置,-2147467247表示没有消费进度,其他小于0的值表示失败,错误码就是返回值 + +- `int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg)` +- `void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param)` +- `int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)` +- `void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param)` + + **功能说明** + + commit接口分为两种类型,每种类型有同步和异步接口: + - 第一种类型:根据消息提交,提交消息里的进度,如果消息传NULL,提交当前consumer所有消费的vgroup的当前进度 : tmq_commit_sync/tmq_commit_async + - 第二种类型:根据某个topic的某个vgroup的offset提交 : tmq_commit_offset_sync/tmq_commit_offset_async + + **参数说明** + - msg:消费到的消息结构,如果msg传NULL,提交当前consumer所有消费的vgroup的当前进度 + + **返回值** + - 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息 + +- `int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId)` + + **功能说明** + - 获取当前消费位置,为消费到的数据位置的下一个位置 + + **返回值** + - 消费位置,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息 + +- `int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset)` + + **功能说明** + - 设置 consumer 在某个topic的某个vgroup的 offset位置,开始消费 + + **返回值** + - 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息 + +- `int32_t int64_t tmq_get_vgroup_offset(TAOS_RES* res)` + + **功能说明** + + 获取 poll 消费到的数据的起始offset + + **参数说明** + - msg:消费到的消息结构 + + **返回值** + - 消费到的offset,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息 + +- `int32_t int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics)` + + **功能说明** + + 获取消费者订阅的 topic 列表 + **参数说明** + - topics: 获取的 topic 列表存储在这个结构中,接口内分配内存,需调用tmq_list_destroy释放 + + **返回值** + - 错误码,0成功,非0失败,可通过 `char *tmq_err2str(int32_t code)` 函数获取错误信息 \ No newline at end of file diff --git a/examples/c/tmq.c b/examples/c/tmq.c index e1133c109e..136c54b874 100644 --- a/examples/c/tmq.c +++ b/examples/c/tmq.c @@ -228,11 +228,6 @@ tmq_t* build_consumer() { tmq_conf_destroy(conf); return NULL; } - code = tmq_conf_set(conf, "experimental.snapshot.enable", "false"); - if (TMQ_CONF_OK != code) { - tmq_conf_destroy(conf); - return NULL; - } tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL); tmq = tmq_consumer_new(conf, NULL, 0); diff --git a/include/client/taos.h b/include/client/taos.h index 3cc2d907ab..3ac25cf580 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -260,7 +260,25 @@ typedef struct tmq_t tmq_t; typedef struct tmq_conf_t tmq_conf_t; typedef struct tmq_list_t tmq_list_t; -typedef void(tmq_commit_cb(tmq_t *, int32_t code, void *param)); +typedef void(tmq_commit_cb(tmq_t *tmq, int32_t code, void *param)); + +typedef enum tmq_conf_res_t { + TMQ_CONF_UNKNOWN = -2, + TMQ_CONF_INVALID = -1, + TMQ_CONF_OK = 0, +} tmq_conf_res_t; + +typedef struct tmq_topic_assignment { + int32_t vgId; + int64_t currentOffset; + int64_t begin; + int64_t end; // The last invalidate version of wal + 1 +} tmq_topic_assignment; + +DLL_EXPORT tmq_conf_t *tmq_conf_new(); +DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); +DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); +DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); DLL_EXPORT tmq_list_t *tmq_list_new(); DLL_EXPORT int32_t tmq_list_append(tmq_list_t *, const char *); @@ -268,55 +286,27 @@ DLL_EXPORT void tmq_list_destroy(tmq_list_t *); DLL_EXPORT int32_t tmq_list_get_size(const tmq_list_t *); DLL_EXPORT char **tmq_list_to_c_array(const tmq_list_t *); -DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); - -DLL_EXPORT const char *tmq_err2str(int32_t code); - -/* ------------------------TMQ CONSUMER INTERFACE------------------------ */ -typedef struct tmq_topic_assignment { - int32_t vgId; - int64_t currentOffset; - int64_t begin; - int64_t end; -} tmq_topic_assignment; - +DLL_EXPORT tmq_t *tmq_consumer_new(tmq_conf_t *conf, char *errstr, int32_t errstrLen); DLL_EXPORT int32_t tmq_subscribe(tmq_t *tmq, const tmq_list_t *topic_list); DLL_EXPORT int32_t tmq_unsubscribe(tmq_t *tmq); DLL_EXPORT int32_t tmq_subscription(tmq_t *tmq, tmq_list_t **topics); DLL_EXPORT TAOS_RES *tmq_consumer_poll(tmq_t *tmq, int64_t timeout); DLL_EXPORT int32_t tmq_consumer_close(tmq_t *tmq); -DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); +DLL_EXPORT int32_t tmq_commit_sync(tmq_t *tmq, const TAOS_RES *msg); //Commit the msg’s offset + 1 DLL_EXPORT void tmq_commit_async(tmq_t *tmq, const TAOS_RES *msg, tmq_commit_cb *cb, void *param); DLL_EXPORT int32_t tmq_commit_offset_sync(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); DLL_EXPORT void tmq_commit_offset_async(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset, tmq_commit_cb *cb, void *param); -DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment, - int32_t *numOfAssignment); +DLL_EXPORT int32_t tmq_get_topic_assignment(tmq_t *tmq, const char *pTopicName, tmq_topic_assignment **assignment,int32_t *numOfAssignment); DLL_EXPORT void tmq_free_assignment(tmq_topic_assignment* pAssignment); DLL_EXPORT int32_t tmq_offset_seek(tmq_t *tmq, const char *pTopicName, int32_t vgId, int64_t offset); +DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); // The current offset is the offset of the last consumed message + 1 +DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); DLL_EXPORT const char *tmq_get_topic_name(TAOS_RES *res); DLL_EXPORT const char *tmq_get_db_name(TAOS_RES *res); DLL_EXPORT int32_t tmq_get_vgroup_id(TAOS_RES *res); -DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); -DLL_EXPORT int64_t tmq_position(tmq_t *tmq, const char *pTopicName, int32_t vgId); -DLL_EXPORT int64_t tmq_committed(tmq_t *tmq, const char *pTopicName, int32_t vgId); - -/* ----------------------TMQ CONFIGURATION INTERFACE---------------------- */ - -enum tmq_conf_res_t { - TMQ_CONF_UNKNOWN = -2, - TMQ_CONF_INVALID = -1, - TMQ_CONF_OK = 0, -}; - -typedef enum tmq_conf_res_t tmq_conf_res_t; - -DLL_EXPORT tmq_conf_t *tmq_conf_new(); -DLL_EXPORT tmq_conf_res_t tmq_conf_set(tmq_conf_t *conf, const char *key, const char *value); -DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf); -DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param); - -/* -------------------------TMQ MSG HANDLE INTERFACE---------------------- */ +DLL_EXPORT int64_t tmq_get_vgroup_offset(TAOS_RES* res); // Get current offset of the result +DLL_EXPORT const char *tmq_err2str(int32_t code); /* ------------------------------ TAOSX -----------------------------------*/ // note: following apis are unstable From c4e5cfd2fbb0d2a85d141ee57d6e03b095c31061 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 15:40:40 +0800 Subject: [PATCH 129/147] retention: return code from copy --- source/dnode/vnode/src/tsdb/tsdbRetention.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c index 46a5d19a1a..267e8b4117 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRetention.c +++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c @@ -179,6 +179,16 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const int32_t lino = 0; STFileOp op = {0}; + // remove old + op = (STFileOp){ + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(rtner->fopArr, op); + TSDB_CHECK_CODE(code, lino, _exit); + // create new op = (STFileOp){ .optype = TSDB_FOP_CREATE, @@ -204,16 +214,6 @@ static int32_t tsdbMigrateDataFileS3(SRTNer *rtner, const STFileObj *fobj, const code = tsdbCopyFileS3(rtner, fobj, &op.nf); TSDB_CHECK_CODE(code, lino, _exit); - // remove old - op = (STFileOp){ - .optype = TSDB_FOP_REMOVE, - .fid = fobj->f->fid, - .of = fobj->f[0], - }; - - code = TARRAY2_APPEND(rtner->fopArr, op); - TSDB_CHECK_CODE(code, lino, _exit); - _exit: if (code) { TSDB_ERROR_LOG(TD_VID(rtner->tsdb->pVnode), lino, code); From 8a84dce6417618332e7c22a01a7df5c0421367ad Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 15 Aug 2023 16:04:30 +0800 Subject: [PATCH 130/147] enh: disable udf/stream/rsma/tsma for windows --- source/dnode/mnode/impl/src/mndDb.c | 3 +-- source/dnode/mnode/impl/src/mndFunc.c | 5 ++--- source/dnode/mnode/impl/src/mndSma.c | 9 ++++----- source/dnode/mnode/impl/src/mndStream.c | 9 ++++----- source/libs/parser/src/parTranslater.c | 13 ------------- 5 files changed, 11 insertions(+), 28 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index fdefe9e5b1..ada80b8370 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -666,14 +666,13 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; } - - mInfo("db:%s, start to create, vgroups:%d", createReq.db, createReq.numOfVgroups); #ifdef WINDOWS if (taosArrayGetSize(createReq.pRetensions) > 0) { code = TSDB_CODE_PAR_INVALID_PLATFORM; goto _OVER; } #endif + mInfo("db:%s, start to create, vgroups:%d", createReq.db, createReq.numOfVgroups); if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_DB, NULL) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index 5f4ac830cd..dc75a311e7 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -359,12 +359,11 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; } - - mInfo("func:%s, start to create, size:%d", createReq.name, createReq.codeLen); #ifdef WINDOWS - code = TSDB_CODE_PAR_INVALID_PLATFORM; + terrno = TSDB_CODE_PAR_INVALID_PLATFORM; goto _OVER; #endif + mInfo("func:%s, start to create, size:%d", createReq.name, createReq.codeLen); if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_FUNC) != 0) { goto _OVER; } diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index ff3f66efaf..55169a5d56 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -655,10 +655,6 @@ _OVER: } static int32_t mndCheckCreateSmaReq(SMCreateSmaReq *pCreate) { -#ifdef WINDOWS - terrno = TSDB_CODE_PAR_INVALID_PLATFORM; - return -1; -#endif terrno = TSDB_CODE_MND_INVALID_SMA_OPTION; if (pCreate->name[0] == 0) return -1; if (pCreate->stb[0] == 0) return -1; @@ -709,7 +705,10 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; } - +#ifdef WINDOWS + terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + goto _OVER; +#endif mInfo("sma:%s, start to create", createReq.name); if (mndCheckCreateSmaReq(&createReq) != 0) { goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index d6bb8c167f..7fd2444ab2 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -253,10 +253,6 @@ static void mndShowStreamTrigger(char *dst, SStreamObj *pStream) { } static int32_t mndCheckCreateStreamReq(SCMCreateStreamReq *pCreate) { -#ifdef WINDOWS - terrno = TSDB_CODE_PAR_INVALID_PLATFORM; - return -1; -#endif if (pCreate->name[0] == 0 || pCreate->sql == NULL || pCreate->sql[0] == 0 || pCreate->sourceDB[0] == 0 || pCreate->targetStbFullName[0] == 0) { terrno = TSDB_CODE_MND_INVALID_STREAM_OPTION; @@ -696,7 +692,10 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { terrno = TSDB_CODE_INVALID_MSG; goto _OVER; } - +#ifdef WINDOWS + terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + goto _OVER; +#endif mInfo("stream:%s, start to create, sql:%s", createStreamReq.name, createStreamReq.sql); if (mndCheckCreateStreamReq(&createStreamReq) != 0) { diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 9c3beea2d8..a41447edf3 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -4418,10 +4418,6 @@ static int32_t checkDbRetentionsOption(STranslateContext* pCxt, SNodeList* pRete return TSDB_CODE_SUCCESS; } -#ifdef WINDOWS - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); -#endif - if (LIST_LENGTH(pRetentions) > 3) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DB_OPTION, "Invalid option retentions"); } @@ -5871,9 +5867,6 @@ static int32_t checkCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pS } static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { -#ifdef WINDOWS - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); -#endif int32_t code = checkCreateSmaIndex(pCxt, pStmt); pStmt->pReq = taosMemoryCalloc(1, sizeof(SMCreateSmaReq)); if (pStmt->pReq == NULL) code = TSDB_CODE_OUT_OF_MEMORY; @@ -7059,9 +7052,6 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* } static int32_t translateCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { -#ifdef WINDOWS - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); -#endif SCMCreateStreamReq createReq = {0}; int32_t code = checkCreateStream(pCxt, pStmt); @@ -7211,9 +7201,6 @@ static int32_t readFromFile(char* pName, int32_t* len, char** buf) { } static int32_t translateCreateFunction(STranslateContext* pCxt, SCreateFunctionStmt* pStmt) { -#ifdef WINDOWS - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platform"); -#endif if (fmIsBuiltinFunc(pStmt->funcName)) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_FUNCTION_NAME); } From 1876a94e264917dd57490591cc86a6ea8c08da7f Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 15 Aug 2023 16:05:09 +0800 Subject: [PATCH 131/147] fix bugs --- source/libs/function/src/builtins.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index ef7c9d1442..9a301b7f1c 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1568,17 +1568,19 @@ static int32_t translateIrate(SFunctionNode* pFunc, char* pErrBuf, int32_t len) } static int32_t translateIrateImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) { - if (3 != LIST_LENGTH(pFunc->pParameterList)) { - return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); - } - uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type; if (isPartial) { + if (3 != LIST_LENGTH(pFunc->pParameterList)) { + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + } if (!IS_NUMERIC_TYPE(colType)) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } pFunc->node.resType = (SDataType){.bytes = getIrateInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}; } else { + if (1 != LIST_LENGTH(pFunc->pParameterList)) { + return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName); + } if (TSDB_DATA_TYPE_BINARY != colType) { return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName); } @@ -2660,8 +2662,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_irate_merge", .type = FUNCTION_TYPE_IRATE_MERGE, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | - FUNC_MGT_FORBID_SYSTABLE_FUNC, + .classification = FUNC_MGT_AGG_FUNC, .translateFunc = translateIrateMerge, .getEnvFunc = getIrateFuncEnv, .initFunc = irateFuncSetup, From 84e472ad03b71aabd88670184d1de52c9b85dd3e Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Tue, 15 Aug 2023 16:10:54 +0800 Subject: [PATCH 132/147] enhance: tag cond col list only once and tag scan derive from scan --- include/libs/nodes/plannodes.h | 10 +---- source/libs/command/src/explain.c | 16 +++---- source/libs/executor/inc/executorInt.h | 7 +++ source/libs/executor/src/scanoperator.c | 36 +++++++-------- source/libs/nodes/src/nodesCloneFuncs.c | 9 +--- source/libs/nodes/src/nodesCodeFuncs.c | 48 ++------------------ source/libs/nodes/src/nodesMsgFuncs.c | 58 +++---------------------- 7 files changed, 41 insertions(+), 143 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 3e24e417fc..4529520ace 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -336,15 +336,7 @@ typedef struct SScanPhysiNode { } SScanPhysiNode; typedef struct STagScanPhysiNode { - // SScanPhysiNode scan; //TODO? - SPhysiNode node; - SNodeList* pScanCols; - SNodeList* pScanPseudoCols; - uint64_t uid; // unique id of the table - uint64_t suid; - int8_t tableType; - SName tableName; - bool groupOrderScan; + SScanPhysiNode scan; bool onlyMetaCtbIdx; //no tbname, tag index not used. } STagScanPhysiNode; diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index e917de33dd..e167b31ef8 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -291,17 +291,17 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i switch (pNode->type) { case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: { STagScanPhysiNode *pTagScanNode = (STagScanPhysiNode *)pNode; - EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->tableName.tname); + EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->scan.tableName.tname); EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); if (pResNode->pExecInfo) { QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - if (pTagScanNode->pScanPseudoCols) { - EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->pScanPseudoCols->length); + if (pTagScanNode->scan.pScanPseudoCols) { + EXPLAIN_ROW_APPEND(EXPLAIN_PSEUDO_COLUMNS_FORMAT, pTagScanNode->scan.pScanPseudoCols->length); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); } - EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->totalRowSize); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->scan.node.pOutputDataBlockDesc->totalRowSize); EXPLAIN_ROW_APPEND(EXPLAIN_RIGHT_PARENTHESIS_FORMAT); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level)); @@ -309,11 +309,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i if (verbose) { EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_COLUMNS_FORMAT, - nodesGetOutputNumFromSlotList(pTagScanNode->node.pOutputDataBlockDesc->pSlots)); + nodesGetOutputNumFromSlotList(pTagScanNode->scan.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); - EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->outputRowSize); - EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->node.pLimit); - EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->node.pSlimit); + EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->scan.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->scan.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->scan.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index cb066d809c..dad15dc6bc 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -251,6 +251,12 @@ typedef struct STableMergeScanInfo { SSortExecInfo sortExecInfo; } STableMergeScanInfo; +typedef struct STagScanFilterContext { + SHashObj* colHash; + int32_t index; + SArray* cInfoList; +} STagScanFilterContext; + typedef struct STagScanInfo { SColumnInfo* pCols; SSDataBlock* pRes; @@ -263,6 +269,7 @@ typedef struct STagScanInfo { void* pCtbCursor; SNode* pTagCond; SNode* pTagIndexCond; + STagScanFilterContext filterCtx; SArray* aUidTags; // SArray SArray* aFilterIdxs; // SArray SStorageAPI* pStorageAPI; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 71352b1c6e..ef28875be4 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2719,12 +2719,6 @@ static int32_t tagScanCreateResultData(SDataType* pType, int32_t numOfRows, SSca return TSDB_CODE_SUCCESS; } -typedef struct STagScanFilterContext { - SHashObj* colHash; - int32_t index; - SArray* cInfoList; -} STagScanFilterContext; - static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { SColumnNode* pSColumnNode = NULL; if (QUERY_NODE_COLUMN == nodeType((*pNode))) { @@ -2767,17 +2761,11 @@ static EDealRes tagScanRewriteTagColumn(SNode** pNode, void* pContext) { } -static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI) { +static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aFilterIdxs, void* pVnode, SStorageAPI* pAPI, STagScanInfo* pInfo) { int32_t code = 0; int32_t numOfTables = taosArrayGetSize(aUidTags); - STagScanFilterContext ctx = {0}; - ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); - ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); - - nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&ctx); - - SSDataBlock* pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); + SSDataBlock* pResBlock = createTagValBlockForFilter(pInfo->filterCtx.cInfoList, numOfTables, aUidTags, pVnode, pAPI); SArray* pBlockList = taosArrayInit(1, POINTER_BYTES); taosArrayPush(pBlockList, &pResBlock); @@ -2801,8 +2789,7 @@ static void tagScanFilterByTagCond(SArray* aUidTags, SNode* pTagCond, SArray* aF blockDataDestroy(pResBlock); taosArrayDestroy(pBlockList); - taosHashCleanup(ctx.colHash); - taosArrayDestroy(ctx.cInfoList); + } static void tagScanFillOneCellWithTag(const STUidTagInfo* pUidTagInfo, SExprInfo* pExprInfo, SColumnInfoData* pColInfo, int rowIndex, const SStorageAPI* pAPI, void* pVnode) { @@ -2911,7 +2898,7 @@ static SSDataBlock* doTagScanFromCtbIdx(SOperatorInfo* pOperator) { bool ignoreFilterIdx = true; if (pInfo->pTagCond != NULL) { ignoreFilterIdx = false; - tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI); + tagScanFilterByTagCond(aUidTags, pInfo->pTagCond, aFilterIdxs, pInfo->readHandle.vnode, pAPI, pInfo); } else { ignoreFilterIdx = true; } @@ -2991,7 +2978,8 @@ static void destroyTagScanOperatorInfo(void* param) { if (pInfo->pCtbCursor != NULL) { pInfo->pStorageAPI->metaFn.closeCtbCursor(pInfo->pCtbCursor, 1); } - + taosHashCleanup(pInfo->filterCtx.colHash); + taosArrayDestroy(pInfo->filterCtx.cInfoList); taosArrayDestroy(pInfo->aFilterIdxs); taosArrayDestroyEx(pInfo->aUidTags, tagScanFreeUidTag); @@ -3001,8 +2989,9 @@ static void destroyTagScanOperatorInfo(void* param) { taosMemoryFreeClear(param); } -SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode, +SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pTagScanNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { + SScanPhysiNode* pPhyNode = (STagScanPhysiNode*)pTagScanNode; STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { @@ -3040,11 +3029,16 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi initResultSizeInfo(&pOperator->resultInfo, 4096); blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity); - if (pPhyNode->onlyMetaCtbIdx) { + if (pTagScanNode->onlyMetaCtbIdx) { pInfo->aUidTags = taosArrayInit(pOperator->resultInfo.capacity, sizeof(STUidTagInfo)); pInfo->aFilterIdxs = taosArrayInit(pOperator->resultInfo.capacity, sizeof(int32_t)); + pInfo->filterCtx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK); + pInfo->filterCtx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo)); + if (pInfo->pTagCond != NULL) { + nodesRewriteExprPostOrder(&pTagCond, tagScanRewriteTagColumn, (void*)&pInfo->filterCtx); + } } - __optr_fn_t tagScanNextFn = (pPhyNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; + __optr_fn_t tagScanNextFn = (pTagScanNode->onlyMetaCtbIdx) ? doTagScanFromCtbIdx : doTagScanFromMetaEntry; pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, tagScanNextFn, NULL, destroyTagScanOperatorInfo, optrDefaultBufFn, NULL); diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 965af41fa7..d3cbaac5e1 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -564,14 +564,7 @@ static int32_t physiScanCopy(const SScanPhysiNode* pSrc, SScanPhysiNode* pDst) { } static int32_t physiTagScanCopy(const STagScanPhysiNode* pSrc, STagScanPhysiNode* pDst) { - COPY_BASE_OBJECT_FIELD(node, physiNodeCopy); - CLONE_NODE_LIST_FIELD(pScanCols); - CLONE_NODE_LIST_FIELD(pScanPseudoCols); - COPY_SCALAR_FIELD(uid); - COPY_SCALAR_FIELD(suid); - COPY_SCALAR_FIELD(tableType); - COPY_OBJECT_FIELD(tableName, sizeof(SName)); - COPY_SCALAR_FIELD(groupOrderScan); + COPY_BASE_OBJECT_FIELD(scan, physiScanCopy); COPY_SCALAR_FIELD(onlyMetaCtbIdx); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 4dfc55c0fa..64a4e0e7d3 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1630,28 +1630,8 @@ static const char* jkTagScanPhysiOnlyMetaCtbIdx = "OnlyMetaCtbIdx"; static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; - int32_t code = physicPlanNodeToJson(pObj, pJson); - if (TSDB_CODE_SUCCESS == code) { - code = nodeListToJson(pJson, jkScanPhysiPlanScanCols, pNode->pScanCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = nodeListToJson(pJson, jkScanPhysiPlanScanPseudoCols, pNode->pScanPseudoCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableId, pNode->uid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanSTableId, pNode->suid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddIntegerToObject(pJson, jkScanPhysiPlanTableType, pNode->tableType); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddObject(pJson, jkScanPhysiPlanTableName, nameToJson, &pNode->tableName); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonAddBoolToObject(pJson, jkScanPhysiPlanGroupOrderScan, pNode->groupOrderScan); - } + int32_t code = physiScanNodeToJson(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkTagScanPhysiOnlyMetaCtbIdx, pNode->onlyMetaCtbIdx); } @@ -1661,28 +1641,8 @@ static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; - int32_t code = jsonToPhysicPlanNode(pJson, pObj); - if (TSDB_CODE_SUCCESS == code) { - code = jsonToNodeList(pJson, jkScanPhysiPlanScanCols, &pNode->pScanCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = jsonToNodeList(pJson, jkScanPhysiPlanScanPseudoCols, &pNode->pScanPseudoCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanTableId, &pNode->uid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetUBigIntValue(pJson, jkScanPhysiPlanSTableId, &pNode->suid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetTinyIntValue(pJson, jkScanPhysiPlanTableType, &pNode->tableType); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonToObject(pJson, jkScanPhysiPlanTableName, jsonToName, &pNode->tableName); - } - if (TSDB_CODE_SUCCESS == code) { - code = tjsonGetBoolValue(pJson, jkScanPhysiPlanGroupOrderScan, &pNode->groupOrderScan); - } + int32_t code = jsonToPhysiScanNode(pObj, pJson); + if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 4d1120861d..cade77fc17 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -2004,42 +2004,15 @@ static int32_t msgToPhysiScanNode(STlvDecoder* pDecoder, void* pObj) { } enum { - PHY_TAG_SCAN_CODE_BASE_NODE = 1, - PHY_TAG_SCAN_CODE_SCAN_COLS, - PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, - PHY_TAG_SCAN_CODE_BASE_UID, - PHY_TAG_SCAN_CODE_BASE_SUID, - PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, - PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, - PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, + PHY_TAG_SCAN_CODE_SCAN = 1, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX }; static int32_t physiTagScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) { const STagScanPhysiNode* pNode = (const STagScanPhysiNode*)pObj; - int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_NODE, physiNodeToMsg, &pNode->node); - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_COLS, nodeListToMsg, pNode->pScanCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS, nodeListToMsg, pNode->pScanPseudoCols); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_UID, pNode->uid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeU64(pEncoder, PHY_TAG_SCAN_CODE_BASE_SUID, pNode->suid); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeI8(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE, pNode->tableType); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_BASE_TABLE_NAME, nameToMsg, &pNode->tableName); - } - if (TSDB_CODE_SUCCESS == code) { - code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN, pNode->groupOrderScan); - } + int32_t code = tlvEncodeObj(pEncoder, PHY_TAG_SCAN_CODE_SCAN, physiScanNodeToMsg, &pNode->scan); + if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeBool(pEncoder, PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX, pNode->onlyMetaCtbIdx); } @@ -2053,29 +2026,8 @@ static int32_t msgToPhysiTagScanNode(STlvDecoder* pDecoder, void* pObj) { STlv* pTlv = NULL; tlvForEach(pDecoder, pTlv, code) { switch (pTlv->type) { - case PHY_TAG_SCAN_CODE_BASE_NODE: - code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node); - break; - case PHY_TAG_SCAN_CODE_SCAN_COLS: - code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanCols); - break; - case PHY_TAG_SCAN_CODE_SCAN_PSEUDO_COLS: - code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanPseudoCols); - break; - case PHY_TAG_SCAN_CODE_BASE_UID: - code = tlvDecodeU64(pTlv, &pNode->uid); - break; - case PHY_TAG_SCAN_CODE_BASE_SUID: - code = tlvDecodeU64(pTlv, &pNode->suid); - break; - case PHY_TAG_SCAN_CODE_BASE_TABLE_TYPE: - code = tlvDecodeI8(pTlv, &pNode->tableType); - break; - case PHY_TAG_SCAN_CODE_BASE_TABLE_NAME: - code = tlvDecodeObjFromTlv(pTlv, msgToName, &pNode->tableName); - break; - case PHY_TAG_SCAN_CODE_BASE_GROUP_ORDER_SCAN: - code = tlvDecodeBool(pTlv, &pNode->groupOrderScan); + case PHY_TAG_SCAN_CODE_SCAN: + code = tlvDecodeObjFromTlv(pTlv, msgToPhysiScanNode, &pNode->scan); break; case PHY_TAG_SCAN_CODE_ONLY_META_CTB_IDX: code = tlvDecodeBool(pTlv, &pNode->onlyMetaCtbIdx); From 242bf77f4c1d6e811b5b41a872eff91f3a5c0985 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 15 Aug 2023 16:13:51 +0800 Subject: [PATCH 133/147] chore: code optimization --- include/util/taoserror.h | 2 +- source/dnode/mnode/impl/src/mndDb.c | 2 +- source/dnode/mnode/impl/src/mndFunc.c | 2 +- source/dnode/mnode/impl/src/mndSma.c | 2 +- source/dnode/mnode/impl/src/mndStream.c | 2 +- source/util/src/terror.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 75ab916230..a5081f2c7d 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -191,6 +191,7 @@ int32_t* taosGetErrno(); // #define TSDB_CODE_MND_FAILED_TO_CREATE_DIR TAOS_DEF_ERROR_CODE(0, 0x0313) // 2.x // #define TSDB_CODE_MND_FAILED_TO_INIT_STEP TAOS_DEF_ERROR_CODE(0, 0x0314) // 2.x #define TSDB_CODE_MND_USER_DISABLED TAOS_DEF_ERROR_CODE(0, 0x0315) +#define TSDB_CODE_MND_INVALID_PLATFORM TAOS_DEF_ERROR_CODE(0, 0x0316) // mnode-sdb #define TSDB_CODE_SDB_OBJ_ALREADY_THERE TAOS_DEF_ERROR_CODE(0, 0x0320) // internal @@ -707,7 +708,6 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_INVALID_OPTR_USAGE TAOS_DEF_ERROR_CODE(0, 0x2667) #define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2668) #define TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED TAOS_DEF_ERROR_CODE(0, 0x2669) -#define TSDB_CODE_PAR_INVALID_PLATFORM TAOS_DEF_ERROR_CODE(0, 0x2670) #define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF) //planner diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index ada80b8370..4f7e80c0a3 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -668,7 +668,7 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) { } #ifdef WINDOWS if (taosArrayGetSize(createReq.pRetensions) > 0) { - code = TSDB_CODE_PAR_INVALID_PLATFORM; + terrno = TSDB_CODE_MND_INVALID_PLATFORM; goto _OVER; } #endif diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c index dc75a311e7..5eb7abf026 100644 --- a/source/dnode/mnode/impl/src/mndFunc.c +++ b/source/dnode/mnode/impl/src/mndFunc.c @@ -360,7 +360,7 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) { goto _OVER; } #ifdef WINDOWS - terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + terrno = TSDB_CODE_MND_INVALID_PLATFORM; goto _OVER; #endif mInfo("func:%s, start to create, size:%d", createReq.name, createReq.codeLen); diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index 55169a5d56..e186a8742f 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -706,7 +706,7 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) { goto _OVER; } #ifdef WINDOWS - terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + terrno = TSDB_CODE_MND_INVALID_PLATFORM; goto _OVER; #endif mInfo("sma:%s, start to create", createReq.name); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 7fd2444ab2..427a52af3b 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -693,7 +693,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) { goto _OVER; } #ifdef WINDOWS - terrno = TSDB_CODE_PAR_INVALID_PLATFORM; + terrno = TSDB_CODE_MND_INVALID_PLATFORM; goto _OVER; #endif mInfo("stream:%s, start to create, sql:%s", createStreamReq.name, createStreamReq.sql); diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 74352f2799..466b9985e7 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -151,6 +151,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SHOWOBJ, "Data expired") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_QUERY_ID, "Invalid query id") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CONN_ID, "Invalid connection id") TAOS_DEFINE_ERROR(TSDB_CODE_MND_USER_DISABLED, "User is disabled") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_PLATFORM, "Unsupported feature on this platform") // mnode-sdb TAOS_DEFINE_ERROR(TSDB_CODE_SDB_OBJ_ALREADY_THERE, "Object already there") @@ -570,7 +571,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GET_META_ERROR, "Fail to get table i TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED_FUNC, "System table not allowed") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SYSTABLE_NOT_ALLOWED, "System table not allowed") -TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_PLATFORM, "Unsupported feature on this platformXX") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error") //planner From 7d1e4a9894c7c66b924a5dcbceecb708058e79f8 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 15 Aug 2023 08:25:33 +0000 Subject: [PATCH 134/147] rebuild index at tag0 --- source/common/src/tdatablock.c | 78 +++++++++++++++++----------------- tests/parallel_test/cases.task | 2 +- 2 files changed, 41 insertions(+), 39 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 5188b1e27c..9f30d04b74 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -26,7 +26,7 @@ int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRo if (pColumnInfoData->reassigned) { int32_t totalSize = 0; for (int32_t row = 0; row < numOfRows; ++row) { - char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row]; + char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row]; int32_t colSize = 0; if (pColumnInfoData->info.type == TSDB_DATA_TYPE_JSON) { colSize = getJsonValueLen(pColData); @@ -142,7 +142,8 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const return 0; } -int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData) { +int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, + const char* pData) { int32_t type = pColumnInfoData->info.type; if (IS_VAR_DATA_TYPE(type)) { int32_t dataLen = 0; @@ -164,7 +165,6 @@ int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, return 0; } - static int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) { if (!IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { return TSDB_CODE_SUCCESS; @@ -188,16 +188,17 @@ static int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) } static int32_t doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t currentRow, const char* pData, - int32_t itemLen, int32_t numOfRows, bool trimValue) { + int32_t itemLen, int32_t numOfRows, bool trimValue) { if (pColumnInfoData->info.bytes < itemLen) { - uWarn("column/tag actual data len %d is bigger than schema len %d, trim it:%d", itemLen, pColumnInfoData->info.bytes, trimValue); + uWarn("column/tag actual data len %d is bigger than schema len %d, trim it:%d", itemLen, + pColumnInfoData->info.bytes, trimValue); if (trimValue) { itemLen = pColumnInfoData->info.bytes; } else { return TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER; } } - + size_t start = 1; // the first item @@ -230,8 +231,8 @@ static int32_t doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t cur return TSDB_CODE_SUCCESS; } -int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, - uint32_t numOfRows, bool trimValue) { +int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows, + bool trimValue) { int32_t len = pColumnInfoData->info.bytes; if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { len = varDataTLen(pData); @@ -262,7 +263,7 @@ static void doBitmapMerge(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, c uint8_t* p = (uint8_t*)pSource->nullbitmap; pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] &= (0B11111111 << shiftBits); // clear remind bits - pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] |= (p[0] >> remindBits); // copy remind bits + pColumnInfoData->nullbitmap[BitmapLen(numOfRow1) - 1] |= (p[0] >> remindBits); // copy remind bits if (BitmapLen(numOfRow1) == BitmapLen(total)) { return; @@ -350,7 +351,7 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int pColumnInfoData->pData = tmp; if (BitmapLen(numOfRow1) < BitmapLen(finalNumOfRows)) { - char* btmp = taosMemoryRealloc(pColumnInfoData->nullbitmap, BitmapLen(finalNumOfRows)); + char* btmp = taosMemoryRealloc(pColumnInfoData->nullbitmap, BitmapLen(finalNumOfRows)); if (btmp == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } @@ -622,7 +623,7 @@ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) { if (pCol->reassigned && IS_VAR_DATA_TYPE(pCol->info.type)) { for (int32_t row = 0; row < numOfRows; ++row) { - char* pColData = pCol->pData + pCol->varmeta.offset[row]; + char* pColData = pCol->pData + pCol->varmeta.offset[row]; int32_t colSize = 0; if (pCol->info.type == TSDB_DATA_TYPE_JSON) { colSize = getJsonValueLen(pColData); @@ -698,8 +699,7 @@ int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) { return TSDB_CODE_SUCCESS; } -static bool colDataIsNNull(const SColumnInfoData* pColumnInfoData, int32_t startIndex, - uint32_t nRows) { +static bool colDataIsNNull(const SColumnInfoData* pColumnInfoData, int32_t startIndex, uint32_t nRows) { if (!pColumnInfoData->hasNull) { return false; } @@ -880,7 +880,6 @@ int32_t dataBlockCompar(const void* p1, const void* p2, const void* param) { } static int32_t blockDataAssign(SColumnInfoData* pCols, const SSDataBlock* pDataBlock, const int32_t* index) { - size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData* pDst = &pCols[i]; @@ -1131,6 +1130,7 @@ static int32_t doEnsureCapacity(SColumnInfoData* pColumn, const SDataBlockInfo* if (tmp == NULL) { return TSDB_CODE_OUT_OF_MEMORY; } + // memset(tmp, 0, numOfRows * pColumn->info.bytes); // copy back the existed data if (pColumn->pData != NULL) { @@ -1474,8 +1474,8 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize, int int end = nRows; while (start <= end) { int mid = start + (end - start) / 2; - //data size + var data type columns offset + fixed data type columns bitmap len - int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid); + // data size + var data type columns offset + fixed data type columns bitmap len + int midSize = rowSize * mid + numVarCols * sizeof(int32_t) * mid + numFixCols * BitmapLen(mid); if (midSize > payloadSize) { result = mid; end = mid - 1; @@ -1669,7 +1669,7 @@ int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) { if (pColData->reassigned && IS_VAR_DATA_TYPE(pColData->info.type)) { for (int32_t row = 0; row < rows; ++row) { - char* pData = pColData->pData + pColData->varmeta.offset[row]; + char* pData = pColData->pData + pColData->varmeta.offset[row]; int32_t colSize = 0; if (pColData->info.type == TSDB_DATA_TYPE_JSON) { colSize = getJsonValueLen(pData); @@ -1772,7 +1772,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) { // for debug char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) { - int32_t size = 2048*1024; + int32_t size = 2048 * 1024; *pDataBuf = taosMemoryCalloc(size, 1); char* dumpBuf = *pDataBuf; char pBuf[128] = {0}; @@ -1780,8 +1780,8 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) int32_t rows = pDataBlock->info.rows; int32_t len = 0; len += snprintf(dumpBuf + len, size - len, - "===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64 - "|rows:%" PRId64 "|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "|tbl:%s\n", + "===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%" PRId64 "|rows:%" PRId64 + "|version:%" PRIu64 "|cal start:%" PRIu64 "|cal end:%" PRIu64 "|tbl:%s\n", flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.id.groupId, pDataBlock->info.id.uid, pDataBlock->info.rows, pDataBlock->info.version, pDataBlock->info.calWin.skey, pDataBlock->info.calWin.ekey, pDataBlock->info.parTbName); @@ -2156,21 +2156,21 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { data += metaSize; dataLen += metaSize; - if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) { - colSizes[col] = 0; - for (int32_t row = 0; row < numOfRows; ++row) { - char* pColData = pColRes->pData + pColRes->varmeta.offset[row]; - int32_t colSize = 0; - if (pColRes->info.type == TSDB_DATA_TYPE_JSON) { - colSize = getJsonValueLen(pColData); - } else { - colSize = varDataTLen(pColData); - } - colSizes[col] += colSize; - dataLen += colSize; - memmove(data, pColData, colSize); - data += colSize; + if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) { + colSizes[col] = 0; + for (int32_t row = 0; row < numOfRows; ++row) { + char* pColData = pColRes->pData + pColRes->varmeta.offset[row]; + int32_t colSize = 0; + if (pColRes->info.type == TSDB_DATA_TYPE_JSON) { + colSize = getJsonValueLen(pColData); + } else { + colSize = varDataTLen(pColData); } + colSizes[col] += colSize; + dataLen += colSize; + memmove(data, pColData, colSize); + data += colSize; + } } else { colSizes[col] = colDataGetLength(pColRes, numOfRows); dataLen += colSizes[col]; @@ -2181,7 +2181,8 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { } colSizes[col] = htonl(colSizes[col]); -// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, htonl(colSizes[col]), colSizes[col]); + // uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, + // htonl(colSizes[col]), colSizes[col]); } *actualLen = dataLen; @@ -2283,7 +2284,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) { } void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList) { -// int32_t totalRows = pBlock->info.rows; + // int32_t totalRows = pBlock->info.rows; int32_t bmLen = BitmapLen(totalRows); char* pBitmap = NULL; int32_t maxRows = 0; @@ -2310,8 +2311,9 @@ void trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList if (colDataIsNull_var(pDst, j)) { colDataSetNull_var(pDst, numOfRows); } else { - // fix address sanitizer error. p1 may point to memory that will change during realloc of colDataSetVal, first copy it to p2 - char* p1 = colDataGetVarData(pDst, j); + // fix address sanitizer error. p1 may point to memory that will change during realloc of colDataSetVal, first + // copy it to p2 + char* p1 = colDataGetVarData(pDst, j); int32_t len = 0; if (pDst->info.type == TSDB_DATA_TYPE_JSON) { len = getJsonValueLen(p1); diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index e81339d705..8345a1112e 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1205,7 +1205,7 @@ ,,y,script,./test.sh -f tsim/tag/tbNameIn.sim ,,y,script,./test.sh -f tmp/monitor.sim ,,y,script,./test.sh -f tsim/tagindex/add_index.sim -,,y,script,./test.sh -f tsim/tagindex/sma_and_tag_index.sim +,,n,script,./test.sh -f tsim/tagindex/sma_and_tag_index.sim #develop test From 1792bf306e9be53779425a59b60af7ef43242d96 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 15 Aug 2023 16:54:26 +0800 Subject: [PATCH 135/147] vnode: fix cos cache evicting --- source/dnode/vnode/src/vnd/vnodeCos.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/source/dnode/vnode/src/vnd/vnodeCos.c b/source/dnode/vnode/src/vnd/vnodeCos.c index 02021831bf..4c76538eb2 100644 --- a/source/dnode/vnode/src/vnd/vnodeCos.c +++ b/source/dnode/vnode/src/vnd/vnodeCos.c @@ -212,19 +212,20 @@ static int32_t evictFileCompareAsce(const void *pLeft, const void *pRight) { void s3EvictCache(const char *path, long object_size) { SDiskSize disk_size = {0}; - if (taosGetDiskSize((char *)path, &disk_size) < 0) { + char dir_name[TSDB_FILENAME_LEN] = "\0"; + + tstrncpy(dir_name, path, TSDB_FILENAME_LEN); + taosDirName(dir_name); + + if (taosGetDiskSize((char *)dir_name, &disk_size) < 0) { terrno = TAOS_SYSTEM_ERROR(errno); vError("failed to get disk:%s size since %s", path, terrstr()); return; } - if (object_size >= disk_size.avail + 1 << 30) { + if (object_size >= disk_size.avail - (1 << 30)) { // evict too old files // 1, list data files' atime under dir(path) - char dir_name[TSDB_FILENAME_LEN] = "\0"; - tstrncpy(dir_name, path, TSDB_FILENAME_LEN); - taosDirName(dir_name); - tdbDirPtr pDir = taosOpenDir(dir_name); if (pDir == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); @@ -236,9 +237,14 @@ void s3EvictCache(const char *path, long object_size) { char *name = taosGetDirEntryName(pDirEntry); if (!strncmp(name + strlen(name) - 5, ".data", 5)) { SEvictFile e_file = {0}; + char entry_name[TSDB_FILENAME_LEN] = "\0"; + int dir_len = strlen(dir_name); - tstrncpy(e_file.name, name, TSDB_FILENAME_LEN); - taosStatFile(name, &e_file.size, NULL, &e_file.atime); + memcpy(e_file.name, dir_name, dir_len); + e_file.name[dir_len] = '/'; + memcpy(e_file.name + dir_len + 1, name, strlen(name)); + + taosStatFile(e_file.name, &e_file.size, NULL, &e_file.atime); taosArrayPush(evict_files, &e_file); } From 450d7e2d3c6cdebf2dc6fbf2a279666e45efd576 Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 15 Aug 2023 16:56:15 +0800 Subject: [PATCH 136/147] enhance: compilation error --- source/libs/nodes/src/nodesCodeFuncs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 64a4e0e7d3..48c9bf33dd 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -1641,7 +1641,7 @@ static int32_t physiTagScanNodeToJson(const void* pObj, SJson* pJson) { static int32_t jsonToPhysiTagScanNode(const SJson* pJson, void* pObj) { STagScanPhysiNode* pNode = (STagScanPhysiNode*)pObj; - int32_t code = jsonToPhysiScanNode(pObj, pJson); + int32_t code = jsonToPhysiScanNode(pJson, pObj); if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkTagScanPhysiOnlyMetaCtbIdx, &pNode->onlyMetaCtbIdx); From 49e4b11547c03cfd7ed5dc993d2fa6e42bab9a8b Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 15 Aug 2023 17:00:53 +0800 Subject: [PATCH 137/147] fix: fix compilation error --- source/libs/executor/src/scanoperator.c | 2 +- source/libs/nodes/test/nodesCloneTest.cpp | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index ef28875be4..d7d97cc514 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2991,7 +2991,7 @@ static void destroyTagScanOperatorInfo(void* param) { SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pTagScanNode, STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, SExecTaskInfo* pTaskInfo) { - SScanPhysiNode* pPhyNode = (STagScanPhysiNode*)pTagScanNode; + SScanPhysiNode* pPhyNode = (SScanPhysiNode*)pTagScanNode; STagScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STagScanInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { diff --git a/source/libs/nodes/test/nodesCloneTest.cpp b/source/libs/nodes/test/nodesCloneTest.cpp index e1e99abab3..8b8893d317 100644 --- a/source/libs/nodes/test/nodesCloneTest.cpp +++ b/source/libs/nodes/test/nodesCloneTest.cpp @@ -199,9 +199,10 @@ TEST_F(NodesCloneTest, physiScan) { ASSERT_EQ(nodeType(pSrc), nodeType(pDst)); STagScanPhysiNode* pSrcNode = (STagScanPhysiNode*)pSrc; STagScanPhysiNode* pDstNode = (STagScanPhysiNode*)pDst; - ASSERT_EQ(pSrcNode->uid, pDstNode->uid); - ASSERT_EQ(pSrcNode->suid, pDstNode->suid); - ASSERT_EQ(pSrcNode->tableType, pDstNode->tableType); + ASSERT_EQ(pSrcNode->scan.uid, pDstNode->scan.uid); + ASSERT_EQ(pSrcNode->scan.suid, pDstNode->scan.suid); + ASSERT_EQ(pSrcNode->scan.tableType, pDstNode->scan.tableType); + ASSERT_EQ(pSrcNode->onlyMetaCtbIdx, pDstNode->onlyMetaCtbIdx); }); std::unique_ptr srcNode(nullptr, nodesDestroyNode); From ccce04ceb98ea22c9e6d67ced1237c10b2b0bda2 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 15 Aug 2023 17:01:45 +0800 Subject: [PATCH 138/147] enh: disable udf on windows --- source/dnode/mgmt/mgmt_dnode/src/dmInt.c | 2 +- source/dnode/mgmt/mgmt_qnode/src/qmInt.c | 2 +- source/dnode/mgmt/mgmt_snode/src/smInt.c | 2 +- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 2 +- source/dnode/mgmt/node_mgmt/src/dmEnv.c | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c index f59d04e618..ae62c74e03 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c @@ -59,7 +59,7 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { return -1; } -#ifdef WINDOWS +#ifndef WINDOWS if (udfStartUdfd(pMgmt->pData->dnodeId) != 0) { dError("failed to start udfd"); } diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c index 82bc2f36f0..657f15920a 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c @@ -57,7 +57,7 @@ static int32_t qmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("qnode-impl", "initialized"); -#ifdef WINDOWS +#ifndef WINDOWS if (udfcOpen() != 0) { dError("qnode can not open udfc"); qmClose(pMgmt); diff --git a/source/dnode/mgmt/mgmt_snode/src/smInt.c b/source/dnode/mgmt/mgmt_snode/src/smInt.c index 7607fcac61..58d4b6139b 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smInt.c +++ b/source/dnode/mgmt/mgmt_snode/src/smInt.c @@ -65,7 +65,7 @@ int32_t smOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("snode-worker", "initialized"); -#ifdef WINDOWS +#ifndef WINDOWS if (udfcOpen() != 0) { dError("failed to open udfc in snode"); smClose(pMgmt); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 872577cf28..2dd0130d56 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -571,7 +571,7 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("vnode-vnodes", "initialized"); -#ifdef WINDOWS +#ifndef WINDOWS if (udfcOpen() != 0) { dError("failed to open udfc in vnode"); goto _OVER; diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index a8f871dc96..f7e429f938 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -198,10 +198,10 @@ void dmCleanup() { monCleanup(); syncCleanUp(); walCleanUp(); -#ifdef WINDOWS +#ifndef WINDOWS udfcClose(); udfStopUdfd(); -#endif +#endif taosStopCacheRefreshWorker(); dmDiskClose(); dInfo("dnode env is cleaned up"); From adaaa922beaac65e7f4a6c83fda69a4213f5db36 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 15 Aug 2023 17:47:19 +0800 Subject: [PATCH 139/147] fix bugs --- source/libs/function/src/builtinsimpl.c | 35 ++++++++++++++++++------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 631f943915..9d4868c9cd 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -5819,6 +5819,7 @@ int32_t irateFunction(SqlFunctionCtx* pCtx) { if (INT64_MIN == pRateInfo->lastKey) { pRateInfo->lastValue = v; pRateInfo->lastKey = tsList[i]; + pRateInfo->hasResult = 1; continue; } @@ -5870,36 +5871,50 @@ static double doCalcRate(const SRateInfo* pRateInfo, double tickPerSec) { return (duration > 0) ? ((double)diff) / (duration / tickPerSec) : 0.0; } -static void irateTransferInfoImpl(TSKEY inputKey, SRateInfo* pInput, SRateInfo* pOutput) { +static void irateTransferInfoImpl(TSKEY inputKey, SRateInfo* pInput, SRateInfo* pOutput, bool isFirstKey) { if (inputKey > pOutput->lastKey) { - pOutput->firstKey = pOutput->lastKey; - pOutput->lastKey = pInput->firstKey; + pOutput->firstKey = pOutput->lastKey; + pOutput->firstValue = pOutput->lastValue; - pOutput->firstValue = pOutput->lastValue; - pOutput->lastValue = pInput->firstValue; + pOutput->lastKey = isFirstKey ? pInput->firstKey : pInput->lastKey; + pOutput->lastValue = isFirstKey ? pInput->firstValue : pInput->lastValue; } else if ((inputKey < pOutput->lastKey) && (inputKey > pOutput->firstKey)) { - pOutput->firstKey = pOutput->lastKey; - pOutput->firstValue = pOutput->lastValue; + pOutput->firstKey = isFirstKey ? pInput->firstKey : pInput->lastKey; + pOutput->firstValue = isFirstKey ? pInput->firstValue : pInput->lastValue; } else { // inputKey < pOutput->firstKey } } +static void irateCopyInfo(SRateInfo* pInput, SRateInfo* pOutput) { + pOutput->firstKey = pInput->firstKey; + pOutput->lastKey = pInput->lastKey; + + pOutput->firstValue = pInput->firstValue; + pOutput->lastValue = pInput->lastValue; +} + static int32_t irateTransferInfo(SRateInfo* pInput, SRateInfo* pOutput) { - pOutput->hasResult = pInput->hasResult; if (pInput->firstKey == pOutput->firstKey || pInput->firstKey == pOutput->lastKey || pInput->lastKey == pOutput->firstKey || pInput->lastKey == pOutput->lastKey) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } + if (pOutput->hasResult == 0) { + irateCopyInfo(pInput, pOutput); + pOutput->hasResult = pInput->hasResult; + return TSDB_CODE_SUCCESS; + } + if (pInput->firstKey != INT64_MIN) { - irateTransferInfoImpl(pInput->firstKey, pInput, pOutput); + irateTransferInfoImpl(pInput->firstKey, pInput, pOutput, true); } if (pInput->lastKey != INT64_MIN) { - irateTransferInfoImpl(pInput->lastKey, pInput, pOutput); + irateTransferInfoImpl(pInput->lastKey, pInput, pOutput, false); } + pOutput->hasResult = pInput->hasResult; return TSDB_CODE_SUCCESS; } From 3c6ddad61114369819dc57bb2b699f1534ae63e5 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Tue, 15 Aug 2023 17:58:12 +0800 Subject: [PATCH 140/147] fix bugs --- source/libs/function/src/builtinsimpl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 9d4868c9cd..bcbb3af950 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -5895,8 +5895,8 @@ static void irateCopyInfo(SRateInfo* pInput, SRateInfo* pOutput) { } static int32_t irateTransferInfo(SRateInfo* pInput, SRateInfo* pOutput) { - if (pInput->firstKey == pOutput->firstKey || pInput->firstKey == pOutput->lastKey || - pInput->lastKey == pOutput->firstKey || pInput->lastKey == pOutput->lastKey) { + if ((pInput->firstKey != INT64_MIN && (pInput->firstKey == pOutput->firstKey || pInput->firstKey == pOutput->lastKey)) || + (pInput->lastKey != INT64_MIN && (pInput->lastKey == pOutput->firstKey || pInput->lastKey == pOutput->lastKey))) { return TSDB_CODE_FUNC_DUP_TIMESTAMP; } From 3b69736b29baff03808b1f0281728520e8be0df6 Mon Sep 17 00:00:00 2001 From: kailixu Date: Tue, 15 Aug 2023 19:59:13 +0800 Subject: [PATCH 141/147] fix: set precision of interval with value from table scan node --- source/libs/executor/src/executil.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index aa0c7945b0..0f62b4f0a0 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -1677,6 +1677,7 @@ SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) { .intervalUnit = pTableScanNode->intervalUnit, .slidingUnit = pTableScanNode->slidingUnit, .offset = pTableScanNode->offset, + .precision = pTableScanNode->scan.node.pOutputDataBlockDesc->precision, }; return interval; From f2d8078d6619d61b9c14af5f4a24e62b08535544 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Wed, 16 Aug 2023 08:10:39 +0800 Subject: [PATCH 142/147] Update 25-grant.md --- docs/zh/12-taos-sql/25-grant.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/12-taos-sql/25-grant.md b/docs/zh/12-taos-sql/25-grant.md index a9c3910500..d53f951e67 100644 --- a/docs/zh/12-taos-sql/25-grant.md +++ b/docs/zh/12-taos-sql/25-grant.md @@ -4,7 +4,7 @@ title: 权限管理 description: 企业版中才具有的权限管理功能 --- -本节讲述如何在 TDengine 中进行权限管理的相关操作。 +本节讲述如何在 TDengine 中进行权限管理的相关操作。权限管理是 TDengine 企业版的特有功能,本节只列举了一些基本的权限管理功能作为示例,更丰富的权限管理请联系 TDengine 销售或市场团队。 ## 创建用户 From 6d80f21ba3ce5a4eb6eac61840ddbb9c7fe85987 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Wed, 16 Aug 2023 08:14:07 +0800 Subject: [PATCH 143/147] Update 25-grant.md --- docs/en/12-taos-sql/25-grant.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md index c214e11876..139b2729ae 100644 --- a/docs/en/12-taos-sql/25-grant.md +++ b/docs/en/12-taos-sql/25-grant.md @@ -4,7 +4,7 @@ sidebar_label: Access Control description: This document describes how to manage users and permissions in TDengine. --- -This document describes how to manage permissions in TDengine. +This document describes how to manage permissions in TDengine. User and Access control is a distingguished feature of TDengine enterprise edition. In this section, only the most fundamental functionalities of user and access control are demonstrated. To get the full knowledge of user and access control, please contact the TDengine team. ## Create a User From 6b961fc04866e187bda66163d4073c68ee9e3037 Mon Sep 17 00:00:00 2001 From: wade zhang <95411902+gccgdb1234@users.noreply.github.com> Date: Wed, 16 Aug 2023 08:15:25 +0800 Subject: [PATCH 144/147] Update 25-grant.md --- docs/en/12-taos-sql/25-grant.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md index 139b2729ae..5ebed12b59 100644 --- a/docs/en/12-taos-sql/25-grant.md +++ b/docs/en/12-taos-sql/25-grant.md @@ -4,7 +4,7 @@ sidebar_label: Access Control description: This document describes how to manage users and permissions in TDengine. --- -This document describes how to manage permissions in TDengine. User and Access control is a distingguished feature of TDengine enterprise edition. In this section, only the most fundamental functionalities of user and access control are demonstrated. To get the full knowledge of user and access control, please contact the TDengine team. +User and Access control is a distingguished feature of TDengine enterprise edition. In this section, only the most fundamental functionalities of user and access control are demonstrated. To get the full knowledge of user and access control, please contact the TDengine team. ## Create a User From b54d6e298250fb62459903ca8c741036e180986b Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 16 Aug 2023 09:55:30 +0800 Subject: [PATCH 145/147] chore: set default value of tsStartUdfd false on windows --- source/common/src/tglobal.c | 6 +++++- source/dnode/mgmt/mgmt_dnode/src/dmInt.c | 2 -- source/dnode/mgmt/mgmt_qnode/src/qmInt.c | 2 -- source/dnode/mgmt/mgmt_snode/src/smInt.c | 2 -- source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 2 -- source/dnode/mgmt/node_mgmt/src/dmEnv.c | 2 -- 6 files changed, 5 insertions(+), 11 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index a772efc33c..da2917f144 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -216,7 +216,11 @@ uint32_t tsCurRange = 100; // range char tsCompressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR // udf -bool tsStartUdfd = true; +#ifdef WINDOWS +bool tsStartUdfd = false; +#else +bool tsStartUdfd = true; +#endif // wal int64_t tsWalFsyncDataSizeLimit = (100 * 1024 * 1024L); diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c index ae62c74e03..09783a5ea9 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmInt.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmInt.c @@ -59,11 +59,9 @@ static int32_t dmOpenMgmt(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { return -1; } -#ifndef WINDOWS if (udfStartUdfd(pMgmt->pData->dnodeId) != 0) { dError("failed to start udfd"); } -#endif pOutput->pMgmt = pMgmt; return 0; diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c index 657f15920a..3b425a0b49 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmInt.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmInt.c @@ -57,13 +57,11 @@ static int32_t qmOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("qnode-impl", "initialized"); -#ifndef WINDOWS if (udfcOpen() != 0) { dError("qnode can not open udfc"); qmClose(pMgmt); return -1; } -#endif if (qmStartWorker(pMgmt) != 0) { dError("failed to start qnode worker since %s", terrstr()); diff --git a/source/dnode/mgmt/mgmt_snode/src/smInt.c b/source/dnode/mgmt/mgmt_snode/src/smInt.c index 58d4b6139b..e222349767 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smInt.c +++ b/source/dnode/mgmt/mgmt_snode/src/smInt.c @@ -65,13 +65,11 @@ int32_t smOpen(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("snode-worker", "initialized"); -#ifndef WINDOWS if (udfcOpen() != 0) { dError("failed to open udfc in snode"); smClose(pMgmt); return -1; } -#endif pOutput->pMgmt = pMgmt; return 0; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 2dd0130d56..0ff2537e4c 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -571,12 +571,10 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { } tmsgReportStartup("vnode-vnodes", "initialized"); -#ifndef WINDOWS if (udfcOpen() != 0) { dError("failed to open udfc in vnode"); goto _OVER; } -#endif code = 0; diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index f7e429f938..65683e5061 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -198,10 +198,8 @@ void dmCleanup() { monCleanup(); syncCleanUp(); walCleanUp(); -#ifndef WINDOWS udfcClose(); udfStopUdfd(); -#endif taosStopCacheRefreshWorker(); dmDiskClose(); dInfo("dnode env is cleaned up"); From 777ece27ae1c47aec575791186eff416758ecc09 Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 16 Aug 2023 09:56:25 +0800 Subject: [PATCH 146/147] chore: code format --- source/dnode/mgmt/node_mgmt/src/dmEnv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index 65683e5061..a34002161d 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -198,7 +198,7 @@ void dmCleanup() { monCleanup(); syncCleanUp(); walCleanUp(); - udfcClose(); + udfcClose(); udfStopUdfd(); taosStopCacheRefreshWorker(); dmDiskClose(); From e587cc50e64af3ff7b3a7912da655f073a5fb7b6 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Thu, 10 Aug 2023 11:09:21 +0800 Subject: [PATCH 147/147] feat: optimize select agg_func partition by tag slimit --- include/libs/nodes/plannodes.h | 2 + source/libs/executor/src/executor.c | 9 +++- source/libs/nodes/src/nodesCodeFuncs.c | 14 ++++++ source/libs/nodes/src/nodesMsgFuncs.c | 12 +++++ source/libs/planner/inc/planInt.h | 10 +++- source/libs/planner/src/planOptimizer.c | 34 ++++++-------- source/libs/planner/src/planPhysiCreater.c | 10 +++- source/libs/planner/src/planSpliter.c | 10 +++- source/libs/planner/src/planUtil.c | 53 ++++++++++++++++++++-- 9 files changed, 124 insertions(+), 30 deletions(-) diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 063318332a..4b6704df43 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -603,6 +603,8 @@ typedef struct SSubplan { SNode* pTagCond; SNode* pTagIndexCond; bool showRewrite; + int32_t rowsThreshold; + bool dynamicRowThreshold; } SSubplan; typedef enum EExplainMode { EXPLAIN_MODE_DISABLE = 1, EXPLAIN_MODE_STATIC, EXPLAIN_MODE_ANALYZE } EExplainMode; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 05767db286..b101a5916c 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -589,6 +589,10 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo int64_t st = taosGetTimestampUs(); int32_t blockIndex = 0; + int32_t rowsThreshold = pTaskInfo->pSubplan->rowsThreshold; + if (!pTaskInfo->pSubplan->dynamicRowThreshold || 4096 <= pTaskInfo->pSubplan->rowsThreshold) { + rowsThreshold = 4096; + } while ((pRes = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot)) != NULL) { SSDataBlock* p = NULL; if (blockIndex >= taosArrayGetSize(pTaskInfo->pResultBlockList)) { @@ -606,10 +610,13 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo ASSERT(p->info.rows > 0); taosArrayPush(pResList, &p); - if (current >= 4096) { + if (current >= rowsThreshold) { break; } } + if (pTaskInfo->pSubplan->dynamicRowThreshold) { + pTaskInfo->pSubplan->rowsThreshold -= current; + } *hasMore = (pRes != NULL); uint64_t el = (taosGetTimestampUs() - st); diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index f25616065e..dc53dbb230 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2814,6 +2814,8 @@ static const char* jkSubplanDataSink = "DataSink"; static const char* jkSubplanTagCond = "TagCond"; static const char* jkSubplanTagIndexCond = "TagIndexCond"; static const char* jkSubplanShowRewrite = "ShowRewrite"; +static const char* jkSubplanRowsThreshold = "RowThreshold"; +static const char* jkSubplanDynamicRowsThreshold = "DyRowThreshold"; static int32_t subplanToJson(const void* pObj, SJson* pJson) { const SSubplan* pNode = (const SSubplan*)pObj; @@ -2852,6 +2854,12 @@ static int32_t subplanToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkSubplanShowRewrite, pNode->showRewrite); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkSubplanRowsThreshold, pNode->rowsThreshold); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkSubplanDynamicRowsThreshold, pNode->dynamicRowThreshold); + } return code; } @@ -2893,6 +2901,12 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkSubplanShowRewrite, &pNode->showRewrite); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetIntValue(pJson, jkSubplanRowsThreshold, &pNode->rowsThreshold); + } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkSubplanDynamicRowsThreshold, &pNode->dynamicRowThreshold); + } return code; } diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c index 20e829766d..37315c9f42 100644 --- a/source/libs/nodes/src/nodesMsgFuncs.c +++ b/source/libs/nodes/src/nodesMsgFuncs.c @@ -3538,6 +3538,12 @@ static int32_t subplanInlineToMsg(const void* pObj, STlvEncoder* pEncoder) { if (TSDB_CODE_SUCCESS == code) { code = tlvEncodeValueBool(pEncoder, pNode->showRewrite); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeValueI32(pEncoder, pNode->rowsThreshold); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvEncodeValueBool(pEncoder, pNode->dynamicRowThreshold); + } return code; } @@ -3587,6 +3593,12 @@ static int32_t msgToSubplanInline(STlvDecoder* pDecoder, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tlvDecodeValueBool(pDecoder, &pNode->showRewrite); } + if (TSDB_CODE_SUCCESS == code) { + code = tlvDecodeValueI32(pDecoder, &pNode->rowsThreshold); + } + if (TSDB_CODE_SUCCESS == code) { + code = tlvDecodeValueBool(pDecoder, &pNode->dynamicRowThreshold); + } return code; } diff --git a/source/libs/planner/inc/planInt.h b/source/libs/planner/inc/planInt.h index 092fe17411..24d77cb9a4 100644 --- a/source/libs/planner/inc/planInt.h +++ b/source/libs/planner/inc/planInt.h @@ -43,8 +43,14 @@ int32_t splitLogicPlan(SPlanContext* pCxt, SLogicSubplan* pLogicSubplan); int32_t scaleOutLogicPlan(SPlanContext* pCxt, SLogicSubplan* pLogicSubplan, SQueryLogicPlan** pLogicPlan); int32_t createPhysiPlan(SPlanContext* pCxt, SQueryLogicPlan* pLogicPlan, SQueryPlan** pPlan, SArray* pExecNodeList); -bool isPartTableAgg(SAggLogicNode* pAgg); -bool isPartTableWinodw(SWindowLogicNode* pWindow); +bool isPartTableAgg(SAggLogicNode* pAgg); +bool isPartTagAgg(SAggLogicNode* pAgg); +bool isPartTableWinodw(SWindowLogicNode* pWindow); + +#define CLONE_LIMIT 1 +#define CLONE_SLIMIT 1 << 1 +#define CLONE_LIMIT_SLIMIT (CLONE_LIMIT | CLONE_SLIMIT) +bool cloneLimit(SLogicNode* pParent, SLogicNode* pChild, uint8_t cloneWhat); #ifdef __cplusplus } diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 16440be511..c73da5e19d 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -368,8 +368,8 @@ static void scanPathOptSetGroupOrderScan(SScanLogicNode* pScan) { if (pScan->node.pParent && nodeType(pScan->node.pParent) == QUERY_NODE_LOGIC_PLAN_AGG) { SAggLogicNode* pAgg = (SAggLogicNode*)pScan->node.pParent; - bool withSlimit = pAgg->node.pSlimit != NULL || (pAgg->node.pParent && pAgg->node.pParent->pSlimit); - if (withSlimit && isPartTableAgg(pAgg)) { + bool withSlimit = pAgg->node.pSlimit != NULL; + if (withSlimit && (isPartTableAgg(pAgg) || isPartTagAgg(pAgg))) { pScan->groupOrderScan = pAgg->node.forceCreateNonBlockingOptr = true; } } @@ -2698,39 +2698,31 @@ static void swapLimit(SLogicNode* pParent, SLogicNode* pChild) { pParent->pLimit = NULL; } -static void cloneLimit(SLogicNode* pParent, SLogicNode* pChild) { - SLimitNode* pLimit = NULL; - if (pParent->pLimit) { - pChild->pLimit = nodesCloneNode(pParent->pLimit); - pLimit = (SLimitNode*)pChild->pLimit; - pLimit->limit += pLimit->offset; - pLimit->offset = 0; - } - - if (pParent->pSlimit) { - pChild->pSlimit = nodesCloneNode(pParent->pSlimit); - pLimit = (SLimitNode*)pChild->pSlimit; - pLimit->limit += pLimit->offset; - pLimit->offset = 0; - } -} - static bool pushDownLimitHow(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPushTo); static bool pushDownLimitTo(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPushTo) { switch (nodeType(pNodeLimitPushTo)) { case QUERY_NODE_LOGIC_PLAN_WINDOW: { SWindowLogicNode* pWindow = (SWindowLogicNode*)pNodeLimitPushTo; if (pWindow->winType != WINDOW_TYPE_INTERVAL) break; - cloneLimit(pNodeWithLimit, pNodeLimitPushTo); + cloneLimit(pNodeWithLimit, pNodeLimitPushTo, CLONE_LIMIT_SLIMIT); return true; } case QUERY_NODE_LOGIC_PLAN_FILL: case QUERY_NODE_LOGIC_PLAN_SORT: { - cloneLimit(pNodeWithLimit, pNodeLimitPushTo); + cloneLimit(pNodeWithLimit, pNodeLimitPushTo, CLONE_LIMIT_SLIMIT); SNode* pChild = NULL; FOREACH(pChild, pNodeLimitPushTo->pChildren) { pushDownLimitHow(pNodeLimitPushTo, (SLogicNode*)pChild); } return true; } + case QUERY_NODE_LOGIC_PLAN_AGG: { + if (nodeType(pNodeWithLimit) == QUERY_NODE_LOGIC_PLAN_PROJECT && + (isPartTagAgg((SAggLogicNode*)pNodeLimitPushTo) || isPartTableAgg((SAggLogicNode*)pNodeLimitPushTo))) { + // when part by tag, slimit will be cloned to agg, and it will be pipelined. + // The scan below will do scanning with group order + return cloneLimit(pNodeWithLimit, pNodeLimitPushTo, CLONE_SLIMIT); + } + break; + } case QUERY_NODE_LOGIC_PLAN_SCAN: if (nodeType(pNodeWithLimit) == QUERY_NODE_LOGIC_PLAN_PROJECT && pNodeWithLimit->pLimit) { swapLimit(pNodeWithLimit, pNodeLimitPushTo); diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 1b92dcd2e7..ee7aea9deb 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -872,12 +872,16 @@ static int32_t rewritePrecalcExpr(SPhysiPlanContext* pCxt, SNode* pNode, SNodeLi } static int32_t createAggPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SAggLogicNode* pAggLogicNode, - SPhysiNode** pPhyNode) { + SPhysiNode** pPhyNode, SSubplan* pSubPlan) { SAggPhysiNode* pAgg = (SAggPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pAggLogicNode, QUERY_NODE_PHYSICAL_PLAN_HASH_AGG); if (NULL == pAgg) { return TSDB_CODE_OUT_OF_MEMORY; } + if (pAgg->node.pSlimit) { + pSubPlan->dynamicRowThreshold = true; + pSubPlan->rowsThreshold = ((SLimitNode*)pAgg->node.pSlimit)->limit; + } pAgg->mergeDataBlock = (GROUP_ACTION_KEEP == pAggLogicNode->node.groupAction ? false : true); pAgg->groupKeyOptimized = pAggLogicNode->hasGroupKeyOptimized; @@ -1617,7 +1621,7 @@ static int32_t doCreatePhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode case QUERY_NODE_LOGIC_PLAN_JOIN: return createJoinPhysiNode(pCxt, pChildren, (SJoinLogicNode*)pLogicNode, pPhyNode); case QUERY_NODE_LOGIC_PLAN_AGG: - return createAggPhysiNode(pCxt, pChildren, (SAggLogicNode*)pLogicNode, pPhyNode); + return createAggPhysiNode(pCxt, pChildren, (SAggLogicNode*)pLogicNode, pPhyNode, pSubplan); case QUERY_NODE_LOGIC_PLAN_PROJECT: return createProjectPhysiNode(pCxt, pChildren, (SProjectLogicNode*)pLogicNode, pPhyNode); case QUERY_NODE_LOGIC_PLAN_EXCHANGE: @@ -1721,6 +1725,8 @@ static SSubplan* makeSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogicSubpl pSubplan->id = pLogicSubplan->id; pSubplan->subplanType = pLogicSubplan->subplanType; pSubplan->level = pLogicSubplan->level; + pSubplan->rowsThreshold = 4096; + pSubplan->dynamicRowThreshold = false; if (NULL != pCxt->pPlanCxt->pUser) { snprintf(pSubplan->user, sizeof(pSubplan->user), "%s", pCxt->pPlanCxt->pUser); } diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 3f6c73b4e5..868aee7147 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -867,8 +867,16 @@ static int32_t stbSplSplitAggNodeForPartTable(SSplitContext* pCxt, SStableSplitI static int32_t stbSplSplitAggNodeForCrossTable(SSplitContext* pCxt, SStableSplitInfo* pInfo) { SLogicNode* pPartAgg = NULL; int32_t code = stbSplCreatePartAggNode((SAggLogicNode*)pInfo->pSplitNode, &pPartAgg); + + if (TSDB_CODE_SUCCESS == code) { - code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartAgg); + // if slimit was pushed down to agg, agg will be pipelined mode, add sort merge before parent agg + if ((SAggLogicNode*)pInfo->pSplitNode->pSlimit) + code = stbSplCreateMergeNode(pCxt, NULL, pInfo->pSplitNode, NULL, pPartAgg, true); + else + code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartAgg); + } else { + nodesDestroyNode((SNode*)pPartAgg); } if (TSDB_CODE_SUCCESS == code) { code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren, diff --git a/source/libs/planner/src/planUtil.c b/source/libs/planner/src/planUtil.c index 88086cde1d..9febe102f6 100644 --- a/source/libs/planner/src/planUtil.c +++ b/source/libs/planner/src/planUtil.c @@ -349,7 +349,7 @@ static bool stbHasPartTbname(SNodeList* pPartKeys) { return false; } -static SNodeList* stbSplGetPartKeys(SLogicNode* pNode) { +static SNodeList* stbGetPartKeys(SLogicNode* pNode) { if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) { return ((SScanLogicNode*)pNode)->pGroupTags; } else if (QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode)) { @@ -367,11 +367,58 @@ bool isPartTableAgg(SAggLogicNode* pAgg) { return stbHasPartTbname(pAgg->pGroupKeys) && stbNotSystemScan((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0)); } - return stbHasPartTbname(stbSplGetPartKeys((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0))); + return stbHasPartTbname(stbGetPartKeys((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0))); +} + +static bool stbHasPartTag(SNodeList* pPartKeys) { + if (NULL == pPartKeys) { + return false; + } + SNode* pPartKey = NULL; + FOREACH(pPartKey, pPartKeys) { + if (QUERY_NODE_GROUPING_SET == nodeType(pPartKey)) { + pPartKey = nodesListGetNode(((SGroupingSetNode*)pPartKey)->pParameterList, 0); + } + if ((QUERY_NODE_FUNCTION == nodeType(pPartKey) && FUNCTION_TYPE_TAGS == ((SFunctionNode*)pPartKey)->funcType) || + (QUERY_NODE_COLUMN == nodeType(pPartKey) && COLUMN_TYPE_TAG == ((SColumnNode*)pPartKey)->colType)) { + return true; + } + } + return false; +} + +bool isPartTagAgg(SAggLogicNode* pAgg) { + if (1 != LIST_LENGTH(pAgg->node.pChildren)) { + return false; + } + if (pAgg->pGroupKeys) { + return stbHasPartTag(pAgg->pGroupKeys) && + stbNotSystemScan((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0)); + } + return stbHasPartTag(stbGetPartKeys((SLogicNode*)nodesListGetNode(pAgg->node.pChildren, 0))); } bool isPartTableWinodw(SWindowLogicNode* pWindow) { - return stbHasPartTbname(stbSplGetPartKeys((SLogicNode*)nodesListGetNode(pWindow->node.pChildren, 0))); + return stbHasPartTbname(stbGetPartKeys((SLogicNode*)nodesListGetNode(pWindow->node.pChildren, 0))); } +bool cloneLimit(SLogicNode* pParent, SLogicNode* pChild, uint8_t cloneWhat) { + SLimitNode* pLimit; + bool cloned = false; + if (pParent->pLimit && (cloneWhat & CLONE_LIMIT)) { + pChild->pLimit = nodesCloneNode(pParent->pLimit); + pLimit = (SLimitNode*)pChild->pLimit; + pLimit->limit += pLimit->offset; + pLimit->offset = 0; + cloned = true; + } + if (pParent->pSlimit && (cloneWhat & CLONE_SLIMIT)) { + pChild->pSlimit = nodesCloneNode(pParent->pSlimit); + pLimit = (SLimitNode*)pChild->pSlimit; + pLimit->limit += pLimit->offset; + pLimit->offset = 0; + cloned = true; + } + return cloned; +}